Skip to content

Commit

Permalink
Improve dry run for topology changes to dry run server side apply
Browse files Browse the repository at this point in the history
Co-authored-by: fabriziopandini <fpandini@vmware.com>
  • Loading branch information
chrischdi and fabriziopandini committed Jun 23, 2022
1 parent a0f3874 commit ae40566
Show file tree
Hide file tree
Showing 8 changed files with 177 additions and 1,280 deletions.
38 changes: 0 additions & 38 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"os"
"path/filepath"
Expand All @@ -35,7 +34,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -853,9 +851,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Rebuild the owne reference chain
o.buildOwnerChain(obj, nodeToCreate)

// Save the old managed fields for topology managed fields migration
oldManagedFields := obj.GetManagedFields()

// FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed.
// If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string.
if len(obj.GetName()) > 0 && len(obj.GetGenerateName()) > 0 {
Expand Down Expand Up @@ -902,10 +897,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Stores the newUID assigned to the newly created object.
nodeToCreate.newUID = obj.GetUID()

if err := patchTopologyManagedFields(ctx, oldManagedFields, obj, cTo); err != nil {
return err
}

return nil
}

Expand Down Expand Up @@ -1173,32 +1164,3 @@ func (o *objectMover) checkTargetProviders(toInventory InventoryClient) error {

return kerrors.NewAggregate(errList)
}

// patchTopologyManagedFields patches the managed fields of obj if parts of it are owned by the topology controller.
// This is necessary to ensure the managed fields created by the topology controller are still present and thus to
// prevent unnecessary machine rollouts. Without patching the managed fields, clusterctl would be the owner of the fields
// which would lead to co-ownership and also additional machine rollouts.
func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.ManagedFieldsEntry, obj *unstructured.Unstructured, cTo client.Client) error {
var containsTopologyManagedFields bool
// Check if the object was owned by the topology controller.
for _, m := range oldManagedFields {
if m.Operation == metav1.ManagedFieldsOperationApply &&
m.Manager == structuredmerge.TopologyManagerName &&
m.Subresource == "" {
containsTopologyManagedFields = true
break
}
}
// Return early if the object was not owned by the topology controller.
if !containsTopologyManagedFields {
return nil
}
base := obj.DeepCopy()
obj.SetManagedFields(oldManagedFields)

if err := cTo.Patch(ctx, obj, client.MergeFrom(base)); err != nil {
return errors.Wrapf(err, "error patching managed fields %q %s/%s",
obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
return nil
}
6 changes: 3 additions & 3 deletions internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -339,14 +339,14 @@ func (r *Reconciler) machineDeploymentToCluster(o client.Object) []ctrl.Request

// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(original, modified, c, opts...)
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(ctx, original, modified, c, opts...)
}
}

// dryRunPatchHelperFactory makes use of a two-ways patch and is used in situations where we cannot rely on managed fields.
func dryRunPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewTwoWaysPatchHelper(original, modified, c, opts...)
}
}
22 changes: 11 additions & 11 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e
// Note: we are using Patch instead of create for ensuring consistency in managedFields for the entire controller
// but in this case it isn't strictly necessary given that we are not using server side apply for modifying the
// object afterwards.
helper, err := r.patchHelperFactory(nil, shim)
helper, err := r.patchHelperFactory(ctx, nil, shim)
if err != nil {
return errors.Wrap(err, "failed to create the patch helper for the cluster shim object")
}
Expand Down Expand Up @@ -385,7 +385,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// If a current MachineHealthCheck doesn't exist but there is a desired MachineHealthCheck attempt to create.
if current == nil && desired != nil {
log.Infof("Creating %s", tlog.KObj{Obj: desired})
helper, err := r.patchHelperFactory(nil, desired)
helper, err := r.patchHelperFactory(ctx, nil, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: desired})
}
Expand Down Expand Up @@ -414,7 +414,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// Check differences between current and desired MachineHealthChecks, and patch if required.
// NOTE: we want to be authoritative on the entire spec because the users are
// expected to change MHC fields from the ClusterClass only.
patchHelper, err := r.patchHelperFactory(current, desired)
patchHelper, err := r.patchHelperFactory(ctx, current, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: current})
}
Expand All @@ -439,7 +439,7 @@ func (r *Reconciler) reconcileCluster(ctx context.Context, s *scope.Scope) error
ctx, log := tlog.LoggerFrom(ctx).WithObject(s.Desired.Cluster).Into(ctx)

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(s.Current.Cluster, s.Desired.Cluster, structuredmerge.IgnorePaths{
patchHelper, err := r.patchHelperFactory(ctx, s.Current.Cluster, s.Desired.Cluster, structuredmerge.IgnorePaths{
{"spec", "controlPlaneEndpoint"}, // this is a well known field that is managed by the Cluster controller, topology should not express opinions on it.
})
if err != nil {
Expand Down Expand Up @@ -511,7 +511,7 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, cluster *clust

log = log.WithObject(md.Object)
log.Infof(fmt.Sprintf("Creating %s", tlog.KObj{Obj: md.Object}))
helper, err := r.patchHelperFactory(nil, md.Object)
helper, err := r.patchHelperFactory(ctx, nil, md.Object)
if err != nil {
return createErrorWithoutObjectName(err, md.Object)
}
Expand Down Expand Up @@ -566,7 +566,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, cluster *clust

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
log = log.WithObject(desiredMD.Object)
patchHelper, err := r.patchHelperFactory(currentMD.Object, desiredMD.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMD.Object, desiredMD.Object)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: currentMD.Object})
}
Expand Down Expand Up @@ -659,7 +659,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
// If there is no current object, create it.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -676,7 +676,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
}

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -736,7 +736,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
// If there is no current object, create the desired object.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -757,7 +757,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
}

// Check differences between current and desired objects, and if there are changes eventually start the template rotation.
patchHelper, err := r.patchHelperFactory(in.current, in.desired)
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -788,7 +788,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci

log.Infof("Rotating %s, new name %s", tlog.KObj{Obj: in.current}, newName)
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand Down
Loading

0 comments on commit ae40566

Please sign in to comment.