Skip to content

Commit

Permalink
Improve dry run for topology changes to dry run server side apply
Browse files Browse the repository at this point in the history
Co-authored-by: fabriziopandini <fpandini@vmware.com>
  • Loading branch information
chrischdi and fabriziopandini committed Jul 6, 2022
1 parent eba218f commit 17536fd
Show file tree
Hide file tree
Showing 15 changed files with 504 additions and 1,209 deletions.
9 changes: 9 additions & 0 deletions api/v1beta1/common_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,15 @@ const (
// An external controller must fulfill the contract of the InfraCluster resource.
// External infrastructure providers should ensure that the annotation, once set, cannot be removed.
ManagedByAnnotation = "cluster.x-k8s.io/managed-by"

// TopologyDryRunAnnotation is an annotation that gets set on objects by the topology controller
// only during a server side dry run apply operation. It is used for validating
// update webhooks for objects which get updated by template rotation (e.g. InfrastructureMachineTemplate).
// When the annotation is set and the admission request is a dry run, the webhook should
// deny validation due to immutability. By that the request will succeed (without
// any changes to the actual object because it is a dry run) and the topology controller
// will receive the resulting object.
TopologyDryRunAnnotation = "topology.cluster.x-k8s.io/dry-run"
)

const (
Expand Down
38 changes: 0 additions & 38 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"os"
"path/filepath"
Expand All @@ -35,7 +34,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -853,9 +851,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Rebuild the owne reference chain
o.buildOwnerChain(obj, nodeToCreate)

// Save the old managed fields for topology managed fields migration
oldManagedFields := obj.GetManagedFields()

// FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed.
// If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string.
if len(obj.GetName()) > 0 && len(obj.GetGenerateName()) > 0 {
Expand Down Expand Up @@ -902,10 +897,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Stores the newUID assigned to the newly created object.
nodeToCreate.newUID = obj.GetUID()

if err := patchTopologyManagedFields(ctx, oldManagedFields, obj, cTo); err != nil {
return err
}

return nil
}

Expand Down Expand Up @@ -1173,32 +1164,3 @@ func (o *objectMover) checkTargetProviders(toInventory InventoryClient) error {

return kerrors.NewAggregate(errList)
}

// patchTopologyManagedFields patches the managed fields of obj if parts of it are owned by the topology controller.
// This is necessary to ensure the managed fields created by the topology controller are still present and thus to
// prevent unnecessary machine rollouts. Without patching the managed fields, clusterctl would be the owner of the fields
// which would lead to co-ownership and also additional machine rollouts.
func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.ManagedFieldsEntry, obj *unstructured.Unstructured, cTo client.Client) error {
var containsTopologyManagedFields bool
// Check if the object was owned by the topology controller.
for _, m := range oldManagedFields {
if m.Operation == metav1.ManagedFieldsOperationApply &&
m.Manager == structuredmerge.TopologyManagerName &&
m.Subresource == "" {
containsTopologyManagedFields = true
break
}
}
// Return early if the object was not owned by the topology controller.
if !containsTopologyManagedFields {
return nil
}
base := obj.DeepCopy()
obj.SetManagedFields(oldManagedFields)

if err := cTo.Patch(ctx, obj, client.MergeFrom(base)); err != nil {
return errors.Wrapf(err, "error patching managed fields %q %s/%s",
obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
return nil
}
6 changes: 3 additions & 3 deletions internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,14 +362,14 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu

// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(original, modified, c, opts...)
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(ctx, original, modified, c, opts...)
}
}

// dryRunPatchHelperFactory makes use of a two-ways patch and is used in situations where we cannot rely on managed fields.
func dryRunPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewTwoWaysPatchHelper(original, modified, c, opts...)
}
}
22 changes: 11 additions & 11 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e
// Note: we are using Patch instead of create for ensuring consistency in managedFields for the entire controller
// but in this case it isn't strictly necessary given that we are not using server side apply for modifying the
// object afterwards.
helper, err := r.patchHelperFactory(nil, shim)
helper, err := r.patchHelperFactory(ctx, nil, shim)
if err != nil {
return errors.Wrap(err, "failed to create the patch helper for the cluster shim object")
}
Expand Down Expand Up @@ -384,7 +384,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// If a current MachineHealthCheck doesn't exist but there is a desired MachineHealthCheck attempt to create.
if current == nil && desired != nil {
log.Infof("Creating %s", tlog.KObj{Obj: desired})
helper, err := r.patchHelperFactory(nil, desired)
helper, err := r.patchHelperFactory(ctx, nil, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: desired})
}
Expand Down Expand Up @@ -413,7 +413,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// Check differences between current and desired MachineHealthChecks, and patch if required.
// NOTE: we want to be authoritative on the entire spec because the users are
// expected to change MHC fields from the ClusterClass only.
patchHelper, err := r.patchHelperFactory(current, desired)
patchHelper, err := r.patchHelperFactory(ctx, current, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: current})
}
Expand All @@ -438,7 +438,7 @@ func (r *Reconciler) reconcileCluster(ctx context.Context, s *scope.Scope) error
ctx, log := tlog.LoggerFrom(ctx).WithObject(s.Desired.Cluster).Into(ctx)

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(s.Current.Cluster, s.Desired.Cluster)
patchHelper, err := r.patchHelperFactory(ctx, s.Current.Cluster, s.Desired.Cluster)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: s.Current.Cluster})
}
Expand Down Expand Up @@ -508,7 +508,7 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, cluster *clust

log = log.WithObject(md.Object)
log.Infof(fmt.Sprintf("Creating %s", tlog.KObj{Obj: md.Object}))
helper, err := r.patchHelperFactory(nil, md.Object)
helper, err := r.patchHelperFactory(ctx, nil, md.Object)
if err != nil {
return createErrorWithoutObjectName(err, md.Object)
}
Expand Down Expand Up @@ -563,7 +563,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, cluster *clust

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
log = log.WithObject(desiredMD.Object)
patchHelper, err := r.patchHelperFactory(currentMD.Object, desiredMD.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMD.Object, desiredMD.Object)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: currentMD.Object})
}
Expand Down Expand Up @@ -656,7 +656,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
// If there is no current object, create it.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -673,7 +673,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
}

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -733,7 +733,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
// If there is no current object, create the desired object.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -754,7 +754,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
}

// Check differences between current and desired objects, and if there are changes eventually start the template rotation.
patchHelper, err := r.patchHelperFactory(in.current, in.desired)
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -785,7 +785,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci

log.Infof("Rotating %s, new name %s", tlog.KObj{Obj: in.current}, newName)
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand Down
Loading

0 comments on commit 17536fd

Please sign in to comment.