Skip to content

Commit

Permalink
Merge pull request #6861 from k8s-infra-cherrypick-robot/cherry-pick-…
Browse files Browse the repository at this point in the history
…6710-to-release-1.2

[release-1.2] 🌱 Improve dry run for topology changes to dry run server side apply
  • Loading branch information
k8s-ci-robot committed Jul 7, 2022
2 parents c3402f0 + 5d5e716 commit f7d01d7
Show file tree
Hide file tree
Showing 17 changed files with 685 additions and 1,224 deletions.
9 changes: 9 additions & 0 deletions api/v1beta1/common_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,15 @@ const (
// An external controller must fulfill the contract of the InfraCluster resource.
// External infrastructure providers should ensure that the annotation, once set, cannot be removed.
ManagedByAnnotation = "cluster.x-k8s.io/managed-by"

// TopologyDryRunAnnotation is an annotation that gets set on objects by the topology controller
// only during a server side dry run apply operation. It is used for validating
// update webhooks for objects which get updated by template rotation (e.g. InfrastructureMachineTemplate).
// When the annotation is set and the admission request is a dry run, the webhook should
// deny validation due to immutability. By that the request will succeed (without
// any changes to the actual object because it is a dry run) and the topology controller
// will receive the resulting object.
TopologyDryRunAnnotation = "topology.cluster.x-k8s.io/dry-run"
)

const (
Expand Down
38 changes: 0 additions & 38 deletions cmd/clusterctl/client/cluster/mover.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ limitations under the License.
package cluster

import (
"context"
"fmt"
"os"
"path/filepath"
Expand All @@ -35,7 +34,6 @@ import (

clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
logf "sigs.k8s.io/cluster-api/cmd/clusterctl/log"
"sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -853,9 +851,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Rebuild the owne reference chain
o.buildOwnerChain(obj, nodeToCreate)

// Save the old managed fields for topology managed fields migration
oldManagedFields := obj.GetManagedFields()

// FIXME Workaround for https://github.com/kubernetes/kubernetes/issues/32220. Remove when the issue is fixed.
// If the resource already exists, the API server ordinarily returns an AlreadyExists error. Due to the above issue, if the resource has a non-empty metadata.generateName field, the API server returns a ServerTimeoutError. To ensure that the API server returns an AlreadyExists error, we set the metadata.generateName field to an empty string.
if len(obj.GetName()) > 0 && len(obj.GetGenerateName()) > 0 {
Expand Down Expand Up @@ -902,10 +897,6 @@ func (o *objectMover) createTargetObject(nodeToCreate *node, toProxy Proxy) erro
// Stores the newUID assigned to the newly created object.
nodeToCreate.newUID = obj.GetUID()

if err := patchTopologyManagedFields(ctx, oldManagedFields, obj, cTo); err != nil {
return err
}

return nil
}

Expand Down Expand Up @@ -1173,32 +1164,3 @@ func (o *objectMover) checkTargetProviders(toInventory InventoryClient) error {

return kerrors.NewAggregate(errList)
}

// patchTopologyManagedFields patches the managed fields of obj if parts of it are owned by the topology controller.
// This is necessary to ensure the managed fields created by the topology controller are still present and thus to
// prevent unnecessary machine rollouts. Without patching the managed fields, clusterctl would be the owner of the fields
// which would lead to co-ownership and also additional machine rollouts.
func patchTopologyManagedFields(ctx context.Context, oldManagedFields []metav1.ManagedFieldsEntry, obj *unstructured.Unstructured, cTo client.Client) error {
var containsTopologyManagedFields bool
// Check if the object was owned by the topology controller.
for _, m := range oldManagedFields {
if m.Operation == metav1.ManagedFieldsOperationApply &&
m.Manager == structuredmerge.TopologyManagerName &&
m.Subresource == "" {
containsTopologyManagedFields = true
break
}
}
// Return early if the object was not owned by the topology controller.
if !containsTopologyManagedFields {
return nil
}
base := obj.DeepCopy()
obj.SetManagedFields(oldManagedFields)

if err := cTo.Patch(ctx, obj, client.MergeFrom(base)); err != nil {
return errors.Wrapf(err, "error patching managed fields %q %s/%s",
obj.GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
return nil
}
97 changes: 83 additions & 14 deletions docs/book/src/developer/providers/v1.1-to-v1.2.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ NOTE: compliance with minimum Kubernetes version is enforced both by clusterctl
**Note**: Only the most relevant dependencies are listed, `k8s.io/` and `ginkgo`/`gomega` dependencies
in ClusterAPI are kept in sync with the versions used by `sigs.k8s.io/controller-runtime`.

- sigs.k8s.io/controller-runtime: v0.11.x => v0.12.3
- sigs.k8s.io/controller-tools: v0.8.x => v0.9.x
- sigs.k8s.io/kind: v0.11.x => v0.14.x
- k8s.io/*: v0.23.x => v0.24.x (derived from controller-runtime)
Expand Down Expand Up @@ -55,20 +56,88 @@ in ClusterAPI are kept in sync with the versions used by `sigs.k8s.io/controller
[merge-strategy](https://kubernetes.io/docs/reference/using-api/server-side-apply/#merge-strategy).
NOTE: the change will cause a rollout on existing clusters created with ClusterClass

E.g. in CAPA

```go
// +optional
Subnets Subnets `json:"subnets,omitempty"
```
Must be modified into:

```go
// +optional
// +listType=map
// +listMapKey=id
Subnets Subnets `json:"subnets,omitempty"
```
E.g. in CAPA

```go
// +optional
Subnets Subnets `json:"subnets,omitempty"
```
Must be modified into:

```go
// +optional
// +listType=map
// +listMapKey=id
Subnets Subnets `json:"subnets,omitempty"
```

- [Server Side Apply](https://kubernetes.io/docs/reference/using-api/server-side-apply/) implementation in ClusterClass and managed topologies
requires to dry-run changes on templates. If infrastructure or bootstrap providers have implemented immutability checks
in their InfrastructureMachineTemplate or BootstrapConfigTemplate webhooks,
it is required to implement the following changes in order to prevent dry-run to return errors.
The implementation requires `sigs.k8s.io/controller-runtime` in version `>= v0.12.3`.

E.g. in CAPD following changes should be applied to the DockerMachineTemplate webhook:

```diff
+ type DockerMachineTemplateWebhook struct{}

+ func (m *DockerMachineTemplateWebhook) SetupWebhookWithManager(mgr ctrl.Manager) error {
- func (m *DockerMachineTemplate) SetupWebhookWithManager(mgr ctrl.Manager) error {
return ctrl.NewWebhookManagedBy(mgr).
- For(m).
+ For(&DockerMachineTemplate{}).
+ WithValidator(m).
Complete()
}

// +kubebuilder:webhook:verbs=create;update,path=/validate-infrastructure-cluster-x-k8s-io-v1beta1-dockermachinetemplate,mutating=false,failurePolicy=fail,matchPolicy=Equivalent,groups=infrastructure.cluster.x-k8s.io,resources=dockermachinetemplates,versions=v1beta1,name=validation.dockermachinetemplate.infrastructure.cluster.x-k8s.io,sideEffects=None,admissionReviewVersions=v1;v1beta1

+ var _ webhook.CustomValidator = &DockerMachineTemplateWebhook{}
- var _ webhook.Validator = &DockerMachineTemplate{}

+ func (*DockerMachineTemplateWebhook) ValidateCreate(ctx context.Context, _ runtime.Object) error {
- func (m *DockerMachineTemplate) ValidateCreate() error {
...
}

+ func (*DockerMachineTemplateWebhook) ValidateUpdate(ctx context.Context, oldRaw runtime.Object, newRaw runtime.Object) error {
+ newObj, ok := newRaw.(*DockerMachineTemplate)
+ if !ok {
+ return apierrors.NewBadRequest(fmt.Sprintf("expected a DockerMachineTemplate but got a %T", newRaw))
+ }
- func (m *DockerMachineTemplate) ValidateUpdate(oldRaw runtime.Object) error {
oldObj, ok := oldRaw.(*DockerMachineTemplate)
if !ok {
return apierrors.NewBadRequest(fmt.Sprintf("expected a DockerMachineTemplate but got a %T", oldRaw))
}
+ req, err := admission.RequestFromContext(ctx)
+ if err != nil {
+ return apierrors.NewBadRequest(fmt.Sprintf("expected a admission.Request inside context: %v", err))
+ }
...
// Immutability check
+ if !topology.ShouldSkipImmutabilityChecks(req, newObj) &&
+ !reflect.DeepEqual(newObj.Spec.Template.Spec, oldObj.Spec.Template.Spec) {
- if !reflect.DeepEqual(m.Spec.Template.Spec, old.Spec.Template.Spec) {
allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "template", "spec"), m, dockerMachineTemplateImmutableMsg))
}
...
}

+ func (*DockerMachineTemplateWebhook) ValidateDelete(ctx context.Context, _ runtime.Object) error {
- func (m *DockerMachineTemplate) ValidateDelete() error {
...
}
```

NOTES:
- We are introducing a `DockerMachineTemplateWebhook` struct because we are going to use a controller runtime
`CustomValidator`. This will allow to skip the immutability check only when the topology controller is dry running
while preserving the validation behaviour for all other cases.
- By using `CustomValidators` it is possible to move webhooks to other packages, thus removing some controller
runtime dependency from the API types. However, choosing to do so or not is up to the provider implementers
and independent of this change.

### Other

Expand Down
6 changes: 3 additions & 3 deletions internal/controllers/topology/cluster/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -362,14 +362,14 @@ func (r *Reconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Clu

// serverSideApplyPatchHelperFactory makes use of managed fields provided by server side apply and is used by the controller.
func serverSideApplyPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(original, modified, c, opts...)
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewServerSidePatchHelper(ctx, original, modified, c, opts...)
}
}

// dryRunPatchHelperFactory makes use of a two-ways patch and is used in situations where we cannot rely on managed fields.
func dryRunPatchHelperFactory(c client.Client) structuredmerge.PatchHelperFactoryFunc {
return func(original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return func(ctx context.Context, original, modified client.Object, opts ...structuredmerge.HelperOption) (structuredmerge.PatchHelper, error) {
return structuredmerge.NewTwoWaysPatchHelper(original, modified, c, opts...)
}
}
22 changes: 11 additions & 11 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e
// Note: we are using Patch instead of create for ensuring consistency in managedFields for the entire controller
// but in this case it isn't strictly necessary given that we are not using server side apply for modifying the
// object afterwards.
helper, err := r.patchHelperFactory(nil, shim)
helper, err := r.patchHelperFactory(ctx, nil, shim)
if err != nil {
return errors.Wrap(err, "failed to create the patch helper for the cluster shim object")
}
Expand Down Expand Up @@ -384,7 +384,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// If a current MachineHealthCheck doesn't exist but there is a desired MachineHealthCheck attempt to create.
if current == nil && desired != nil {
log.Infof("Creating %s", tlog.KObj{Obj: desired})
helper, err := r.patchHelperFactory(nil, desired)
helper, err := r.patchHelperFactory(ctx, nil, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: desired})
}
Expand Down Expand Up @@ -413,7 +413,7 @@ func (r *Reconciler) reconcileMachineHealthCheck(ctx context.Context, current, d
// Check differences between current and desired MachineHealthChecks, and patch if required.
// NOTE: we want to be authoritative on the entire spec because the users are
// expected to change MHC fields from the ClusterClass only.
patchHelper, err := r.patchHelperFactory(current, desired)
patchHelper, err := r.patchHelperFactory(ctx, current, desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: current})
}
Expand All @@ -438,7 +438,7 @@ func (r *Reconciler) reconcileCluster(ctx context.Context, s *scope.Scope) error
ctx, log := tlog.LoggerFrom(ctx).WithObject(s.Desired.Cluster).Into(ctx)

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(s.Current.Cluster, s.Desired.Cluster)
patchHelper, err := r.patchHelperFactory(ctx, s.Current.Cluster, s.Desired.Cluster)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: s.Current.Cluster})
}
Expand Down Expand Up @@ -508,7 +508,7 @@ func (r *Reconciler) createMachineDeployment(ctx context.Context, cluster *clust

log = log.WithObject(md.Object)
log.Infof(fmt.Sprintf("Creating %s", tlog.KObj{Obj: md.Object}))
helper, err := r.patchHelperFactory(nil, md.Object)
helper, err := r.patchHelperFactory(ctx, nil, md.Object)
if err != nil {
return createErrorWithoutObjectName(err, md.Object)
}
Expand Down Expand Up @@ -563,7 +563,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, cluster *clust

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
log = log.WithObject(desiredMD.Object)
patchHelper, err := r.patchHelperFactory(currentMD.Object, desiredMD.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMD.Object, desiredMD.Object)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: currentMD.Object})
}
Expand Down Expand Up @@ -656,7 +656,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
// If there is no current object, create it.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -673,7 +673,7 @@ func (r *Reconciler) reconcileReferencedObject(ctx context.Context, in reconcile
}

// Check differences between current and desired state, and eventually patch the current object.
patchHelper, err := r.patchHelperFactory(in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired, structuredmerge.IgnorePaths(in.ignorePaths))
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -733,7 +733,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
// If there is no current object, create the desired object.
if in.current == nil {
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand All @@ -754,7 +754,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci
}

// Check differences between current and desired objects, and if there are changes eventually start the template rotation.
patchHelper, err := r.patchHelperFactory(in.current, in.desired)
patchHelper, err := r.patchHelperFactory(ctx, in.current, in.desired)
if err != nil {
return errors.Wrapf(err, "failed to create patch helper for %s", tlog.KObj{Obj: in.current})
}
Expand Down Expand Up @@ -785,7 +785,7 @@ func (r *Reconciler) reconcileReferencedTemplate(ctx context.Context, in reconci

log.Infof("Rotating %s, new name %s", tlog.KObj{Obj: in.current}, newName)
log.Infof("Creating %s", tlog.KObj{Obj: in.desired})
helper, err := r.patchHelperFactory(nil, in.desired)
helper, err := r.patchHelperFactory(ctx, nil, in.desired)
if err != nil {
return errors.Wrap(createErrorWithoutObjectName(err, in.desired), "failed to create patch helper")
}
Expand Down
Loading

0 comments on commit f7d01d7

Please sign in to comment.