From 6b68d1c85ce298c4a048eccf3765ce77edc25ebb Mon Sep 17 00:00:00 2001 From: willie-yao Date: Fri, 1 Sep 2023 22:06:12 +0000 Subject: [PATCH] Ownerref --- .../topology/cluster/desired_state.go | 2 ++ .../topology/cluster/desired_state_test.go | 32 ++++++++++--------- 2 files changed, 19 insertions(+), 15 deletions(-) diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index 0aed86b5c517..6b3bc280cc54 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -953,6 +953,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c cluster: s.Current.Cluster, namePrefix: bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), currentObjectRef: currentBootstrapConfigRef, + ownerRef: ownerReferenceTo(s.Current.Cluster), }) if err != nil { return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name) @@ -977,6 +978,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c cluster: s.Current.Cluster, namePrefix: infrastructureMachinePoolNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), currentObjectRef: currentInfraMachinePoolRef, + ownerRef: ownerReferenceTo(s.Current.Cluster), }) if err != nil { return nil, errors.Wrapf(err, "failed to compute infrastructure object for topology %q", machinePoolTopology.Name) diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/internal/controllers/topology/cluster/desired_state_test.go index 1deffbc701f2..d2e2ecfccab3 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/internal/controllers/topology/cluster/desired_state_test.go @@ -29,10 +29,10 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/pointer" - expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" @@ -1725,7 +1725,7 @@ func TestComputeMachineDeployment(t *testing.T) { } func TestComputeMachinePool(t *testing.T) { - workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-infraMachinePooltemplate"). + workerInfrastructureMachinePoolTemplate := builder.InfrastructureMachinePoolTemplate(metav1.NamespaceDefault, "linux-worker-inframachinepooltemplate"). Build() workerBootstrapTemplate := builder.BootstrapTemplate(metav1.NamespaceDefault, "linux-worker-bootstraptemplate"). Build() @@ -1739,6 +1739,7 @@ func TestComputeMachinePool(t *testing.T) { WithAnnotations(annotations). WithInfrastructureTemplate(workerInfrastructureMachinePoolTemplate). WithBootstrapTemplate(workerBootstrapTemplate). + WithFailureDomains("A", "B"). WithNodeDrainTimeout(&clusterClassDuration). WithNodeVolumeDetachTimeout(&clusterClassDuration). WithNodeDeletionTimeout(&clusterClassDuration). @@ -1777,7 +1778,7 @@ func TestComputeMachinePool(t *testing.T) { } replicas := int32(5) - topologyFailureDomain := "B" + topologyFailureDomains := []string{"A", "B"} topologyDuration := metav1.Duration{Duration: 10 * time.Second} var topologyMinReadySeconds int32 = 10 mpTopology := clusterv1.MachinePoolTopology{ @@ -1797,6 +1798,7 @@ func TestComputeMachinePool(t *testing.T) { Class: "linux-worker", Name: "big-pool-of-machines", Replicas: &replicas, + FailureDomains: topologyFailureDomains, NodeDrainTimeout: &topologyDuration, NodeVolumeDetachTimeout: &topologyDuration, NodeDeletionTimeout: &topologyDuration, @@ -1828,13 +1830,13 @@ func TestComputeMachinePool(t *testing.T) { actualMd := actual.Object g.Expect(*actualMd.Spec.Replicas).To(Equal(replicas)) g.Expect(*actualMd.Spec.MinReadySeconds).To(Equal(topologyMinReadySeconds)) - g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) g.Expect(*actualMd.Spec.Template.Spec.NodeDrainTimeout).To(Equal(topologyDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(topologyDuration)) g.Expect(*actualMd.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(topologyDuration)) g.Expect(actualMd.Spec.ClusterName).To(Equal("cluster1")) g.Expect(actualMd.Name).To(ContainSubstring("cluster1")) g.Expect(actualMd.Name).To(ContainSubstring("big-pool-of-machines")) + g.Expect(actualMd.Spec.FailureDomains).To(Equal(topologyFailureDomains)) expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations) delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation) @@ -1854,7 +1856,7 @@ func TestComputeMachinePool(t *testing.T) { }))) g.Expect(actualMd.Spec.Template.Spec.InfrastructureRef.Name).ToNot(Equal("linux-worker-inframachinetemplate")) - g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-BootstrapObject")) + g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).ToNot(Equal("linux-worker-bootstraptemplate")) }) t.Run("Generates the machine pool and the referenced templates using ClusterClass defaults", func(t *testing.T) { g := NewWithT(t) @@ -1916,31 +1918,31 @@ func TestComputeMachinePool(t *testing.T) { actual, err := computeMachinePool(ctx, s, mpTopology) g.Expect(err).ToNot(HaveOccurred()) - actualMd := actual.Object + actualMp := actual.Object - g.Expect(*actualMd.Spec.Replicas).NotTo(Equal(currentReplicas)) - g.Expect(*actualMd.Spec.Template.Spec.FailureDomain).To(Equal(topologyFailureDomain)) - g.Expect(actualMd.Name).To(Equal("existing-Pool-1")) + g.Expect(*actualMp.Spec.Replicas).NotTo(Equal(currentReplicas)) + g.Expect(actualMp.Spec.FailureDomains).To(Equal(topologyFailureDomains)) + g.Expect(actualMp.Name).To(Equal("existing-pool-1")) expectedAnnotations := util.MergeMap(mpTopology.Metadata.Annotations, mp1.Template.Metadata.Annotations) delete(expectedAnnotations, clusterv1.ClusterTopologyHoldUpgradeSequenceAnnotation) delete(expectedAnnotations, clusterv1.ClusterTopologyDeferUpgradeAnnotation) - g.Expect(actualMd.Annotations).To(Equal(expectedAnnotations)) - g.Expect(actualMd.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations)) + g.Expect(actualMp.Annotations).To(Equal(expectedAnnotations)) + g.Expect(actualMp.Spec.Template.ObjectMeta.Annotations).To(Equal(expectedAnnotations)) - g.Expect(actualMd.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + g.Expect(actualMp.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, clusterv1.ClusterTopologyOwnedLabel: "", clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", }))) - g.Expect(actualMd.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ + g.Expect(actualMp.Spec.Template.ObjectMeta.Labels).To(BeComparableTo(util.MergeMap(mpTopology.Metadata.Labels, mp1.Template.Metadata.Labels, map[string]string{ clusterv1.ClusterNameLabel: cluster.Name, clusterv1.ClusterTopologyOwnedLabel: "", clusterv1.ClusterTopologyMachinePoolNameLabel: "big-pool-of-machines", }))) - g.Expect(actualMd.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinetemplate")) - g.Expect(actualMd.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-BootstrapObject")) + g.Expect(actualMp.Spec.Template.Spec.InfrastructureRef.Name).To(Equal("linux-worker-inframachinepooltemplate")) + g.Expect(actualMp.Spec.Template.Spec.Bootstrap.ConfigRef.Name).To(Equal("linux-worker-bootstraptemplate")) }) t.Run("If a machine pool references a topology class that does not exist, machine pool generation fails", func(t *testing.T) {