Skip to content

Commit

Permalink
update
Browse files Browse the repository at this point in the history
  • Loading branch information
sbueringer committed Feb 20, 2023
1 parent c01c786 commit 6d86938
Show file tree
Hide file tree
Showing 4 changed files with 210 additions and 44 deletions.
135 changes: 111 additions & 24 deletions internal/controllers/topology/cluster/structuredmerge/dryrun.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (

jsonpatch "github.com/evanphx/json-patch/v5"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/client"

Expand All @@ -44,6 +45,13 @@ type dryRunSSAPatchInput struct {

// dryRunSSAPatch uses server side apply dry run to determine if the operation is going to change the actual object.
func dryRunSSAPatch(ctx context.Context, dryRunCtx *dryRunSSAPatchInput) (bool, bool, error) {
// For dry run we use the same options as for the intent but with adding metadata.managedFields
// to ensure that changes to ownership are detected.
dryRunHelperOptions := &HelperOptions{
allowedPaths: append(dryRunCtx.helperOptions.allowedPaths, []string{"metadata", "managedFields"}),
ignorePaths: dryRunCtx.helperOptions.ignorePaths,
}

// Add TopologyDryRunAnnotation to notify validation webhooks to skip immutability checks.
if err := unstructured.SetNestedField(dryRunCtx.originalUnstructured.Object, "", "metadata", "annotations", clusterv1.TopologyDryRunAnnotation); err != nil {
return false, false, errors.Wrap(err, "failed to add topology dry-run annotation to original object")
Expand All @@ -52,34 +60,67 @@ func dryRunSSAPatch(ctx context.Context, dryRunCtx *dryRunSSAPatchInput) (bool,
return false, false, errors.Wrap(err, "failed to add topology dry-run annotation to modified object")
}

// Do a server-side apply dry-run with modifiedUnstructured to get the updated object.
err := dryRunCtx.client.Patch(ctx, dryRunCtx.modifiedUnstructured, client.Apply, client.DryRunAll, client.FieldOwner(TopologyManagerName), client.ForceOwnership)
if err != nil {
// This catches errors like metadata.uid changes.
return false, false, errors.Wrap(err, "server side apply dry-run failed for modified object")
}

// Do a server-side apply dry-run with originalUnstructured to ensure the latest defaulting is applied.
// Note: After a Cluster API upgrade there is no guarantee that defaulting has been run on existing objects.
// We have to ensure defaulting has been applied before comparing original with modified, because otherwise
// differences in defaulting would trigger rollouts.
err := dryRunCtx.client.Patch(ctx, dryRunCtx.originalUnstructured, client.Apply, client.DryRunAll, client.FieldOwner(TopologyManagerName), client.ForceOwnership)
// Note: We cannot use the managed fields of originalUnstructured after SSA dryrun, because applying
// the whole originalUnstructured will give capi-topology ownership of all fields. Thus, we back up the
// managed fields and restore them after the dry run.
// It's fine to compare the managed fields of modifiedUnstructured after dry-run with originalUnstructured
// before dry-run as we want to know if applying modifiedUnstructured would change managed fields on original.

// Filter object to drop fields which are not part of our intent.
// Note: It's especially important to also drop metadata.resourceVersion, otherwise we could get the following
// error: "the object has been modified; please apply your changes to the latest version and try again"
filterObject(dryRunCtx.originalUnstructured, dryRunHelperOptions)
// Backup managed fields.
originalUnstructuredManagedFieldsBeforeSSA := dryRunCtx.originalUnstructured.GetManagedFields()
// Set managed fields to nil.
// Note: Otherwise we would get the following error:
// "failed to request dry-run server side apply: metadata.managedFields must be nil"
dryRunCtx.originalUnstructured.SetManagedFields(nil)
err = dryRunCtx.client.Patch(ctx, dryRunCtx.originalUnstructured, client.Apply, client.DryRunAll, client.FieldOwner(TopologyManagerName), client.ForceOwnership)
if err != nil {
// This catches errors like metadata.uid changes.
return false, false, errors.Wrap(err, "server side apply dry-run failed for original object")
}
// Do a server-side apply dry-run with modifiedUnstructured to get the updated object.
err = dryRunCtx.client.Patch(ctx, dryRunCtx.modifiedUnstructured, client.Apply, client.DryRunAll, client.FieldOwner(TopologyManagerName), client.ForceOwnership)
if err != nil {
// This catches errors like metadata.uid changes.
return false, false, errors.Wrap(err, "server side apply dry-run failed for modified object")
// Restore managed fields.
dryRunCtx.originalUnstructured.SetManagedFields(originalUnstructuredManagedFieldsBeforeSSA)

// Cleanup the dryRunUnstructured object to remove the added TopologyDryRunAnnotation
// and remove the affected managedFields for `manager=capi-topology` which would
// otherwise show the additional field ownership for the annotation we added and
// the changed managedField timestamp.
// We also drop managedFields of other managers as we don't care about if other managers
// made changes to the object. We only want to trigger a ServerSideApply if we would make
// changes to the object.
// Please note that if other managers made changes to fields that we care about and thus ownership changed,
// this would affect our managed fields as well and we would still detect it by diffing our managed fields.
if err := cleanupManagedFieldsAndAnnotation(dryRunCtx.modifiedUnstructured); err != nil {
return false, false, errors.Wrap(err, "failed to filter topology dry-run annotation on modified object")
}

// Cleanup the originalUnstructured and modifiedUnstructured objects to remove the added TopologyDryRunAnnotation
// and remove all managedFields.
// We can't compare managedFields as running dry-run with originalUnstructured will give us ownership of
// all fields in originalUnstructured, while modifiedUnstructured only has the fields of our intent set.
// We don't have to trigger a rollout for managed fields as they only reflect ownership of fields, as soon
// as we actually want to change field values we will do a rollout anyway.
cleanupManagedFieldsAndAnnotations(dryRunCtx.originalUnstructured)
cleanupManagedFieldsAndAnnotations(dryRunCtx.modifiedUnstructured)
// Also run the function for the originalUnstructured to remove the managedField
// timestamp for `manager=capi-topology`.
// We also drop managedFields of other managers as we don't care about if other managers
// made changes to the object. We only want to trigger a ServerSideApply if we would make
// changes to the object.
// Please note that if other managers made changes to fields that we care about and thus ownership changed,
// this would affect our managed fields as well and we would still detect it by diffing our managed fields.
if err := cleanupManagedFieldsAndAnnotation(dryRunCtx.originalUnstructured); err != nil {
return false, false, errors.Wrap(err, "failed to filter topology dry-run annotation on original object")
}

// Drop all fields which are not part of our intent.
filterObject(dryRunCtx.originalUnstructured, dryRunCtx.helperOptions)
filterObject(dryRunCtx.modifiedUnstructured, dryRunCtx.helperOptions)
// Drop the other fields which are not part of our intent.
filterObject(dryRunCtx.modifiedUnstructured, dryRunHelperOptions)
filterObject(dryRunCtx.originalUnstructured, dryRunHelperOptions)

// Compare the output of dry run to the original object.
originalJSON, err := json.Marshal(dryRunCtx.originalUnstructured)
Expand Down Expand Up @@ -108,10 +149,12 @@ func dryRunSSAPatch(ctx context.Context, dryRunCtx *dryRunSSAPatchInput) (bool,
return hasChanges, hasSpecChanges, nil
}

// cleanupManagedFieldsAndAnnotations adjusts the obj to remove the topology.cluster.x-k8s.io/dry-run
// and cluster.x-k8s.io/conversion-data annotations as well as all managed fields.
func cleanupManagedFieldsAndAnnotations(obj *unstructured.Unstructured) {
// Filter the annotations as well as leftover empty maps.
// cleanupManagedFieldsAndAnnotation adjusts the obj to remove the topology.cluster.x-k8s.io/dry-run
// and cluster.x-k8s.io/conversion-data annotations as well as the field ownership reference in managedFields. It does
// also remove the timestamp of the managedField for `manager=capi-topology` because
// it is expected to change due to the additional annotation.
func cleanupManagedFieldsAndAnnotation(obj *unstructured.Unstructured) error {
// Filter the topology.cluster.x-k8s.io/dry-run annotation as well as leftover empty maps.
filterIntent(&filterIntentInput{
path: contract.Path{},
value: obj.Object,
Expand All @@ -124,6 +167,50 @@ func cleanupManagedFieldsAndAnnotations(obj *unstructured.Unstructured) {
}),
})

// Drop managed fields.
obj.SetManagedFields(nil)
// Adjust the managed field for Manager=TopologyManagerName, Subresource="", Operation="Apply" and
// drop managed fields of other controllers.
oldManagedFields := obj.GetManagedFields()
newManagedFields := []metav1.ManagedFieldsEntry{}
for _, managedField := range oldManagedFields {
if managedField.Manager != TopologyManagerName {
continue
}
if managedField.Subresource != "" {
continue
}
if managedField.Operation != metav1.ManagedFieldsOperationApply {
continue
}

// Unset the managedField timestamp because managedFields are treated as atomic map.
managedField.Time = nil

// Unmarshal the managed fields into a map[string]interface{}
fieldsV1 := map[string]interface{}{}
if err := json.Unmarshal(managedField.FieldsV1.Raw, &fieldsV1); err != nil {
return errors.Wrap(err, "failed to unmarshal managed fields")
}

// Filter out the annotation ownership as well as leftover empty maps.
filterIntent(&filterIntentInput{
path: contract.Path{},
value: fieldsV1,
shouldFilter: isIgnorePath([]contract.Path{
{"f:metadata", "f:annotations", "f:" + clusterv1.TopologyDryRunAnnotation},
{"f:metadata", "f:annotations", "f:" + conversion.DataAnnotation},
}),
})

fieldsV1Raw, err := json.Marshal(fieldsV1)
if err != nil {
return errors.Wrap(err, "failed to marshal managed fields")
}
managedField.FieldsV1.Raw = fieldsV1Raw

newManagedFields = append(newManagedFields, managedField)
}

obj.SetManagedFields(newManagedFields)

return nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package structuredmerge

import (
"testing"
"time"

. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand All @@ -28,6 +29,10 @@ import (
)

func Test_cleanupManagedFieldsAndAnnotation(t *testing.T) {
rawManagedFieldWithAnnotation := `{"f:metadata":{"f:annotations":{"f:topology.cluster.x-k8s.io/dry-run":{},"f:cluster.x-k8s.io/conversion-data":{}}}}`
rawManagedFieldWithAnnotationSpecLabels := `{"f:metadata":{"f:annotations":{"f:topology.cluster.x-k8s.io/dry-run":{},"f:cluster.x-k8s.io/conversion-data":{}},"f:labels":{}},"f:spec":{"f:foo":{}}}`
rawManagedFieldWithSpecLabels := `{"f:metadata":{"f:labels":{}},"f:spec":{"f:foo":{}}}`

tests := []struct {
name string
obj *unstructured.Unstructured
Expand All @@ -38,15 +43,15 @@ func Test_cleanupManagedFieldsAndAnnotation(t *testing.T) {
name: "no-op",
obj: newObjectBuilder().Build(),
wantErr: false,
want: newObjectBuilder().Build(),
},
{
name: "filter out dry-run annotation",
obj: newObjectBuilder().
WithAnnotation(clusterv1.TopologyDryRunAnnotation, "").
Build(),
wantErr: false,
want: newObjectBuilder().Build(),
want: newObjectBuilder().
Build(),
},
{
name: "filter out conversion annotation",
Expand All @@ -58,20 +63,73 @@ func Test_cleanupManagedFieldsAndAnnotation(t *testing.T) {
Build(),
},
{
name: "remove managed field",
name: "managedFields: should drop managed fields of other manager",
obj: newObjectBuilder().
WithManagedFieldsEntry("other", "", metav1.ManagedFieldsOperationApply, []byte(`{}`), nil).
Build(),
wantErr: false,
want: newObjectBuilder().
Build(),
},
{
name: "managedFields: should drop managed fields of a subresource",
obj: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "status", metav1.ManagedFieldsOperationApply, []byte(`{}`), nil).
Build(),
wantErr: false,
want: newObjectBuilder().
Build(),
},
{
name: "managedFields: should drop managed fields of another operation",
obj: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationUpdate, []byte(`{}`), nil).
Build(),
wantErr: false,
want: newObjectBuilder().
Build(),
},
{
name: "managedFields: cleanup up the managed field entry",
obj: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationApply, []byte(rawManagedFieldWithAnnotation), nil).
Build(),
wantErr: false,
want: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationApply, []byte(`{}`), nil).
Build(),
},
{
name: "managedFields: cleanup the managed field entry and preserve other ownership",
obj: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationApply, []byte(rawManagedFieldWithAnnotationSpecLabels), nil).
Build(),
wantErr: false,
want: newObjectBuilder().Build(),
want: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationApply, []byte(rawManagedFieldWithSpecLabels), nil).
Build(),
},
{
name: "managedFields: remove time",
obj: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationApply, []byte(`{}`), &metav1.Time{Time: time.Time{}}).
Build(),
wantErr: false,
want: newObjectBuilder().
WithManagedFieldsEntry(TopologyManagerName, "", metav1.ManagedFieldsOperationApply, []byte(`{}`), nil).
Build(),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)

cleanupManagedFieldsAndAnnotations(tt.obj)
g.Expect(tt.obj).To(BeEquivalentTo(tt.want))
if err := cleanupManagedFieldsAndAnnotation(tt.obj); (err != nil) != tt.wantErr {
t.Errorf("cleanupManagedFieldsAndAnnotation() error = %v, wantErr %v", err, tt.wantErr)
}
if tt.want != nil {
g.Expect(tt.obj).To(BeEquivalentTo(tt.want))
}
})
}
}
Expand All @@ -95,6 +153,12 @@ func (b objectBuilder) DeepCopy() objectBuilder {
}

func (b objectBuilder) Build() *unstructured.Unstructured {
// Setting an empty managed field array if no managed field is set so there is
// no difference between an object which never had a managed field and one from
// which all managed field entries were removed.
if b.u.GetManagedFields() == nil {
b.u.SetManagedFields([]metav1.ManagedFieldsEntry{})
}
return b.u.DeepCopy()
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,9 @@ func NewServerSidePatchHelper(ctx context.Context, original, modified client.Obj
}
}

// Filter the modifiedUnstructured and originalUnstructured objects to only contain fields that we intend to manage.
// Filter the modifiedUnstructured object to only contain changes intendet to be done.
// The originalUnstructured object will be filtered in dryRunSSAPatch using other options.
filterObject(modifiedUnstructured, helperOptions)
if originalUnstructured != nil {
// Note: It's important that we also drop metadata.managedFields from originalUnstructured.
// We will run SSA dry-run on originalUnstructured and with metadata.managedFields we would get
// this error: "failed to request dry-run server side apply: metadata.managedFields must be nil"
filterObject(originalUnstructured, helperOptions)
}

// Carry over uid to match the intent to:
// * create (uid==""):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ func TestServerSideApply(t *testing.T) {
original := obj.DeepCopy()
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(original), original)).To(Succeed())

// Create a patch helper for a modified object with no changes to what previously applied by the topology manager.
// Create a patch helper for a modified object with no changes to what previously applied by th topology manager.
modified := obj.DeepCopy()

p0, err := NewServerSidePatchHelper(ctx, original, modified, env.GetClient(), IgnorePaths{{"spec", "foo"}})
Expand Down Expand Up @@ -313,7 +313,7 @@ func TestServerSideApply(t *testing.T) {
original := obj.DeepCopy()
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(original), original)).To(Succeed())

// Create a patch helper for a modified object with some changes to what previously applied by the topology manager.
// Create a patch helper for a modified object with some changes to what previously applied by th topology manager.
modified := obj.DeepCopy()
g.Expect(unstructured.SetNestedField(modified.Object, "changed", "spec", "controlPlaneEndpoint", "host")).To(Succeed())

Expand Down Expand Up @@ -350,18 +350,38 @@ func TestServerSideApply(t *testing.T) {
original := obj.DeepCopy()
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(original), original)).To(Succeed())

// Create a patch helper for a modified object with no changed values to what was previously applied by the topology manager.
// .spec.controlPlaneEndpoint.host is already "changed" and owned by "capi-topology"
// .spec.bar is already "changed" but owned by "___TestServerSideApply_in_sigs_k8s_io_cluster_api_..."
// Create a patch helper for a modified object with some changes to what previously applied by th topology manager.
modified := obj.DeepCopy()
g.Expect(unstructured.SetNestedField(modified.Object, "changed", "spec", "controlPlaneEndpoint", "host")).To(Succeed())
g.Expect(unstructured.SetNestedField(modified.Object, "changed", "spec", "bar")).To(Succeed())

// There should be no changes as the only change would be to managed fields and we don't patch in this case.
p0, err := NewServerSidePatchHelper(ctx, original, modified, env.GetClient(), IgnorePaths{{"spec", "foo"}})
g.Expect(err).ToNot(HaveOccurred())
g.Expect(p0.HasChanges()).To(BeFalse())
g.Expect(p0.HasChanges()).To(BeTrue())
g.Expect(p0.HasSpecChanges()).To(BeFalse())

// Create the object using server side apply
g.Expect(p0.Patch(ctx)).To(Succeed())

// Check the object and verify the change is applied as well as managed field updated accordingly.
got := obj.DeepCopy()
g.Expect(env.GetAPIReader().Get(ctx, client.ObjectKeyFromObject(got), got)).To(Succeed())

// Check if resourceVersion did change
g.Expect(got.GetResourceVersion()).ToNot(Equal(original.GetResourceVersion()))

v2, _, _ := unstructured.NestedString(got.Object, "spec", "bar")
g.Expect(v2).To(Equal("changed"))

fieldV1 := getTopologyManagedFields(got)
g.Expect(fieldV1).ToNot(BeEmpty())
g.Expect(fieldV1).To(HaveKey("f:spec")) // topology controller should express opinions on spec.

specFieldV1 := fieldV1["f:spec"].(map[string]interface{})
g.Expect(specFieldV1).ToNot(BeEmpty())
g.Expect(specFieldV1).To(HaveKey("f:controlPlaneEndpoint")) // topology controller should express opinions on spec.controlPlaneEndpoint.
g.Expect(specFieldV1).ToNot(HaveKey("f:foo")) // topology controller should not express opinions on ignore paths.
g.Expect(specFieldV1).To(HaveKey("f:bar")) // topology controller now has an opinion on a field previously managed by other controllers (force ownership).
})
t.Run("Topology controller reconcile again with an opinion on a field managed by another controller (force ownership)", func(t *testing.T) {
g := NewWithT(t)
Expand Down

0 comments on commit 6d86938

Please sign in to comment.