diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go index 514383e8cae3..4b3b592162e0 100644 --- a/test/e2e/cluster_upgrade.go +++ b/test/e2e/cluster_upgrade.go @@ -208,19 +208,18 @@ func ClusterUpgradeConformanceSpec(ctx context.Context, inputGetter func() Clust MachineDeployments: clusterResources.MachineDeployments, WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }) - } - } - // Only attempt to upgrade MachinePools if they were provided in the template. - if len(clusterResources.MachinePools) > 0 && workerMachineCount > 0 { - By("Upgrading the machinepool instances") - framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: clusterResources.Cluster, - UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), - WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), - MachinePools: clusterResources.MachinePools, - }) + if len(clusterResources.MachinePools) > 0 { + By("Upgrading the machinepool instances") + framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), + MachinePools: clusterResources.MachinePools, + }) + } + } } By("Waiting until nodes are ready") diff --git a/test/e2e/cluster_upgrade_runtimesdk.go b/test/e2e/cluster_upgrade_runtimesdk.go index 56b53bd9728e..e2d62e5d4d53 100644 --- a/test/e2e/cluster_upgrade_runtimesdk.go +++ b/test/e2e/cluster_upgrade_runtimesdk.go @@ -247,18 +247,6 @@ func clusterUpgradeWithRuntimeSDKSpec(ctx context.Context, inputGetter func() cl }, }) - // Only attempt to upgrade MachinePools if they were provided in the template. - if len(clusterResources.MachinePools) > 0 && workerMachineCount > 0 { - By("Upgrading the machinepool instances") - framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{ - ClusterProxy: input.BootstrapClusterProxy, - Cluster: clusterResources.Cluster, - UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), - WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), - MachinePools: clusterResources.MachinePools, - }) - } - By("Waiting until nodes are ready") workloadProxy := input.BootstrapClusterProxy.GetWorkloadCluster(ctx, namespace.Name, clusterResources.Cluster.Name) workloadClient := workloadProxy.GetClient() diff --git a/test/e2e/clusterclass_changes.go b/test/e2e/clusterclass_changes.go index d9a309a3c3f1..3f2e9cd5e0d0 100644 --- a/test/e2e/clusterclass_changes.go +++ b/test/e2e/clusterclass_changes.go @@ -34,6 +34,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/test/e2e/internal/log" "sigs.k8s.io/cluster-api/test/framework" @@ -426,6 +427,131 @@ func modifyMachineDeploymentViaClusterClassAndWait(ctx context.Context, input mo } } +// modifyMachinePoolViaClusterClassAndWaitInput is the input type for modifyMachinePoolViaClusterClassAndWait. +type modifyMachinePoolViaClusterClassAndWaitInput struct { + ClusterProxy framework.ClusterProxy + ClusterClass *clusterv1.ClusterClass + Cluster *clusterv1.Cluster + ModifyBootstrapConfigTemplateFields map[string]interface{} + ModifyInfrastructureMachinePoolTemplateFields map[string]interface{} + WaitForMachinePools []interface{} +} + +// modifyMachinePoolViaClusterClassAndWait modifies the BootstrapConfigTemplate of MachinePoolClasses of a ClusterClass +// by setting ModifyBootstrapConfigTemplateFields and waits until the changes are rolled out to the MachinePools of the Cluster. +// NOTE: This helper is really specific to this test, so we are keeping this private vs. adding it to the framework. +func modifyMachinePoolViaClusterClassAndWait(ctx context.Context, input modifyMachinePoolViaClusterClassAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for modifyMachinePoolViaClusterClassAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling modifyMachinePoolViaClusterClassAndWait") + Expect(input.ClusterClass).ToNot(BeNil(), "Invalid argument. input.ClusterClass can't be nil when calling modifyMachinePoolViaClusterClassAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling modifyMachinePoolViaClusterClassAndWait") + + mgmtClient := input.ClusterProxy.GetClient() + + for _, mpClass := range input.ClusterClass.Spec.Workers.MachinePools { + // Only try to modify the BootstrapConfigTemplate if the MachinePoolClass is using a BootstrapConfigTemplate. + var bootstrapConfigTemplateRef *corev1.ObjectReference + var newBootstrapConfigTemplateName string + if mpClass.Template.Bootstrap.Ref != nil { + log.Logf("Modifying the BootstrapConfigTemplate of MachinePoolClass %q of ClusterClass %s", mpClass.Class, klog.KObj(input.ClusterClass)) + + // Retrieve BootstrapConfigTemplate object. + bootstrapConfigTemplateRef = mpClass.Template.Bootstrap.Ref + bootstrapConfigTemplate, err := external.Get(ctx, mgmtClient, bootstrapConfigTemplateRef, input.Cluster.Namespace) + Expect(err).ToNot(HaveOccurred()) + // Create a new BootstrapConfigTemplate object with a new name and ModifyBootstrapConfigTemplateFields set. + newBootstrapConfigTemplate := bootstrapConfigTemplate.DeepCopy() + newBootstrapConfigTemplateName = fmt.Sprintf("%s-%s", bootstrapConfigTemplateRef.Name, util.RandomString(6)) + newBootstrapConfigTemplate.SetName(newBootstrapConfigTemplateName) + newBootstrapConfigTemplate.SetResourceVersion("") + for fieldPath, value := range input.ModifyBootstrapConfigTemplateFields { + Expect(unstructured.SetNestedField(newBootstrapConfigTemplate.Object, value, strings.Split(fieldPath, ".")...)).To(Succeed()) + } + Expect(mgmtClient.Create(ctx, newBootstrapConfigTemplate)).To(Succeed()) + } + + log.Logf("Modifying the InfrastructureMachinePoolTemplate of MachinePoolClass %q of ClusterClass %s", mpClass.Class, klog.KObj(input.ClusterClass)) + + // Retrieve InfrastructureMachineTemplate object. + infrastructureMachinePoolTemplateRef := mpClass.Template.Infrastructure.Ref + infrastructureMachinePoolTemplate, err := external.Get(ctx, mgmtClient, infrastructureMachinePoolTemplateRef, input.Cluster.Namespace) + Expect(err).ToNot(HaveOccurred()) + // Create a new InfrastructureMachinePoolTemplate object with a new name and ModifyInfrastructureMachinePoolTemplateFields set. + newInfrastructureMachinePoolTemplate := infrastructureMachinePoolTemplate.DeepCopy() + newInfrastructureMachinePoolTemplateName := fmt.Sprintf("%s-%s", infrastructureMachinePoolTemplateRef.Name, util.RandomString(6)) + newInfrastructureMachinePoolTemplate.SetName(newInfrastructureMachinePoolTemplateName) + newInfrastructureMachinePoolTemplate.SetResourceVersion("") + for fieldPath, value := range input.ModifyInfrastructureMachinePoolTemplateFields { + Expect(unstructured.SetNestedField(newInfrastructureMachinePoolTemplate.Object, value, strings.Split(fieldPath, ".")...)).To(Succeed()) + } + Expect(mgmtClient.Create(ctx, newInfrastructureMachinePoolTemplate)).To(Succeed()) + + // Patch the refs of the MachinePoolClass to reference the new templates. + patchHelper, err := patch.NewHelper(input.ClusterClass, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + if mpClass.Template.Bootstrap.Ref != nil { + bootstrapConfigTemplateRef.Name = newBootstrapConfigTemplateName + } + infrastructureMachinePoolTemplateRef.Name = newInfrastructureMachinePoolTemplateName + Expect(patchHelper.Patch(ctx, input.ClusterClass)).To(Succeed()) + + log.Logf("Waiting for MachinePool rollout for MachinePoolClass %q to complete.", mpClass.Class) + for _, mpTopology := range input.Cluster.Spec.Topology.Workers.MachinePools { + // Continue if the MachinePoolTopology belongs to another MachinePoolClass. + if mpTopology.Class != mpClass.Class { + continue + } + + // NOTE: We only wait until the change is rolled out to the MachinePool objects and not to the worker machines + // to speed up the test and focus the test on the ClusterClass feature. + log.Logf("Waiting for MachinePool rollout for MachinePoolTopology %q (class %q) to complete.", mpTopology.Name, mpTopology.Class) + Eventually(func(g Gomega) error { + // Get MachinePool for the current MachinePoolTopology. + mpList := &expv1.MachinePoolList{} + g.Expect(mgmtClient.List(ctx, mpList, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels{ + clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, + })).To(Succeed()) + g.Expect(mpList.Items).To(HaveLen(1), fmt.Sprintf("expected one MachinePool for topology %q, but got %d", mpTopology.Name, len(mpList.Items))) + mp := mpList.Items[0] + + // Verify that the fields from Cluster topology are set on the MachinePool. + assertMachinePoolTopologyFields(g, mp, mpTopology) + + if mpClass.Template.Bootstrap.Ref != nil { + // Get the corresponding BootstrapConfig object. + bootstrapConfigObjectRef := mp.Spec.Template.Spec.Bootstrap.ConfigRef + bootstrapConfigObject, err := external.Get(ctx, mgmtClient, bootstrapConfigObjectRef, input.Cluster.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + + // Verify that ModifyBootstrapConfigTemplateFields have been set and propagates to the BootstrapConfig. + for fieldPath, expectedValue := range input.ModifyBootstrapConfigTemplateFields { + // MachinePools have a BootstrapConfig, not a BootstrapConfigTemplate, so we need to convert the fieldPath so it can find it on the object. + fieldPath = strings.TrimPrefix(fieldPath, "spec.template.") + currentValue, ok, err := unstructured.NestedFieldNoCopy(bootstrapConfigObject.Object, strings.Split(fieldPath, ".")...) + g.Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("failed to get field %q", fieldPath)) + g.Expect(ok).To(BeTrue(), fmt.Sprintf("failed to get field %q", fieldPath)) + g.Expect(currentValue).To(Equal(expectedValue), fmt.Sprintf("field %q should be equal", fieldPath)) + } + } + + // Get the corresponding InfrastructureMachinePoolTemplate. + infrastructureMachinePoolTemplateRef := mp.Spec.Template.Spec.InfrastructureRef + infrastructureMachinePoolTemplate, err := external.Get(ctx, mgmtClient, &infrastructureMachinePoolTemplateRef, input.Cluster.Namespace) + g.Expect(err).ToNot(HaveOccurred()) + + // Verify that ModifyInfrastructureMachinePoolTemplateFields have been set. + for fieldPath, expectedValue := range input.ModifyInfrastructureMachinePoolTemplateFields { + currentValue, ok, err := unstructured.NestedFieldNoCopy(infrastructureMachinePoolTemplate.Object, strings.Split(fieldPath, ".")...) + g.Expect(err).ToNot(HaveOccurred(), fmt.Sprintf("failed to get field %q", fieldPath)) + g.Expect(ok).To(BeTrue(), fmt.Sprintf("failed to get field %q", fieldPath)) + g.Expect(currentValue).To(Equal(expectedValue), fmt.Sprintf("field %q should be equal", fieldPath)) + } + return nil + }, input.WaitForMachinePools...).Should(BeNil()) + } + } +} + // assertMachineDeploymentTopologyFields asserts that all fields set in the MachineDeploymentTopology have been set on the MachineDeployment. // Note: We intentionally focus on the fields set in the MachineDeploymentTopology and ignore the ones set through ClusterClass // as we want to validate that the fields of the MachineDeploymentTopology have been propagated correctly. @@ -464,6 +590,40 @@ func assertMachineDeploymentTopologyFields(g Gomega, md clusterv1.MachineDeploym } } +// assertMachinePoolTopologyFields asserts that all fields set in the MachinePoolTopology have been set on the MachinePool. +// Note: We intentionally focus on the fields set in the MachinePoolTopology and ignore the ones set through ClusterClass +// as we want to validate that the fields of the MachinePoolTopology have been propagated correctly. +func assertMachinePoolTopologyFields(g Gomega, mp expv1.MachinePool, mpTopology clusterv1.MachinePoolTopology) { + // Note: We only verify that all labels and annotations from the Cluster topology exist to keep it simple here. + // This is fully covered by the ClusterClass rollout test. + for k, v := range mpTopology.Metadata.Labels { + g.Expect(mp.Labels).To(HaveKeyWithValue(k, v)) + } + for k, v := range mpTopology.Metadata.Annotations { + g.Expect(mp.Annotations).To(HaveKeyWithValue(k, v)) + } + + if mpTopology.NodeDrainTimeout != nil { + g.Expect(mp.Spec.Template.Spec.NodeDrainTimeout).To(Equal(mpTopology.NodeDrainTimeout)) + } + + if mpTopology.NodeDeletionTimeout != nil { + g.Expect(mp.Spec.Template.Spec.NodeDeletionTimeout).To(Equal(mpTopology.NodeDeletionTimeout)) + } + + if mpTopology.NodeVolumeDetachTimeout != nil { + g.Expect(mp.Spec.Template.Spec.NodeVolumeDetachTimeout).To(Equal(mpTopology.NodeVolumeDetachTimeout)) + } + + if mpTopology.MinReadySeconds != nil { + g.Expect(mp.Spec.MinReadySeconds).To(Equal(mpTopology.MinReadySeconds)) + } + + if mpTopology.FailureDomains != nil && mp.Spec.Template.Spec.FailureDomain != nil { + g.Expect(mpTopology.FailureDomains).To(ContainElement(mp.Spec.Template.Spec.FailureDomain)) + } +} + // rebaseClusterClassAndWaitInput is the input type for rebaseClusterClassAndWait. type rebaseClusterClassAndWaitInput struct { ClusterProxy framework.ClusterProxy diff --git a/test/e2e/clusterclass_rollout.go b/test/e2e/clusterclass_rollout.go index 4746eb499188..4b897d553da5 100644 --- a/test/e2e/clusterclass_rollout.go +++ b/test/e2e/clusterclass_rollout.go @@ -40,6 +40,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/machineset" "sigs.k8s.io/cluster-api/test/e2e/internal/log" @@ -134,10 +135,11 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas WaitForClusterIntervals: input.E2EConfig.GetIntervals(specName, "wait-cluster"), WaitForControlPlaneIntervals: input.E2EConfig.GetIntervals(specName, "wait-control-plane"), WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-nodes"), }, clusterResources) assertClusterObjects(ctx, input.BootstrapClusterProxy, clusterResources.Cluster, clusterResources.ClusterClass) - By("Rolling out changes to control plane and MachineDeployments (in-place)") + By("Rolling out changes to control plane, MachineDeployments, and MachinePools (in-place)") machinesBeforeUpgrade := getMachinesByCluster(ctx, input.BootstrapClusterProxy.GetClient(), clusterResources.Cluster) By("Modifying the control plane configuration via Cluster topology and wait for changes to be applied to the control plane object (in-place)") modifyControlPlaneViaClusterAndWait(ctx, modifyControlPlaneViaClusterAndWaitInput{ @@ -184,6 +186,25 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas }, WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }) + By("Modifying the MachinePool configuration via Cluster topology and wait for changes to be applied to the MachinePool (in-place)") + modifyMachinePoolViaClusterAndWait(ctx, modifyMachinePoolViaClusterAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + Cluster: clusterResources.Cluster, + ModifyMachinePoolTopology: func(topology *clusterv1.MachinePoolTopology) { + // Drop existing labels and annotations and set new ones. + topology.Metadata.Labels = map[string]string{ + "Cluster.topology.machinePool.newLabel": "Cluster.topology.machinePool.newLabelValue", + } + topology.Metadata.Annotations = map[string]string{ + "Cluster.topology.machinePool.newAnnotation": "Cluster.topology.machinePool.newAnnotationValue", + } + topology.NodeDrainTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec + topology.NodeDeletionTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec + topology.NodeVolumeDetachTimeout = &metav1.Duration{Duration: time.Duration(rand.Intn(20)) * time.Second} //nolint:gosec + topology.MinReadySeconds = pointer.Int32(rand.Int31n(20)) //nolint:gosec + }, + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) By("Verifying there are no unexpected rollouts through in-place rollout") Consistently(func(g Gomega) { machinesAfterUpgrade := getMachinesByCluster(ctx, input.BootstrapClusterProxy.GetClient(), clusterResources.Cluster) @@ -191,7 +212,7 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas }, 30*time.Second, 1*time.Second).Should(Succeed()) assertClusterObjects(ctx, input.BootstrapClusterProxy, clusterResources.Cluster, clusterResources.ClusterClass) - By("Rolling out changes to control plane and MachineDeployments (rollout)") + By("Rolling out changes to control plane, MachineDeployments, and MachinePools (rollout)") machinesBeforeUpgrade = getMachinesByCluster(ctx, input.BootstrapClusterProxy.GetClient(), clusterResources.Cluster) By("Modifying the control plane configuration via ClusterClass and wait for changes to be applied to the control plane object (rollout)") modifyControlPlaneViaClusterClassAndWait(ctx, modifyClusterClassControlPlaneAndWaitInput{ @@ -213,6 +234,16 @@ func ClusterClassRolloutSpec(ctx context.Context, inputGetter func() ClusterClas }, WaitForMachineDeployments: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }) + By("Modifying the MachinePool configuration via ClusterClass and wait for changes to be applied to the MachinePools (rollout)") + modifyMachinePoolViaClusterClassAndWait(ctx, modifyMachinePoolViaClusterClassAndWaitInput{ + ClusterProxy: input.BootstrapClusterProxy, + ClusterClass: clusterResources.ClusterClass, + Cluster: clusterResources.Cluster, + ModifyBootstrapConfigTemplateFields: map[string]interface{}{ + "spec.template.spec.verbosity": int64(4), + }, + WaitForMachinePools: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), + }) By("Verifying all Machines are replaced through rollout") Eventually(func(g Gomega) { machinesAfterUpgrade := getMachinesByCluster(ctx, input.BootstrapClusterProxy.GetClient(), clusterResources.Cluster) @@ -278,6 +309,9 @@ func assertClusterObjects(ctx context.Context, clusterProxy framework.ClusterPro assertMachineSets(g, clusterObjects, cluster) assertMachineSetsMachines(g, clusterObjects, cluster) + // MachinePools + assertMachinePools(g, clusterClassObjects, clusterObjects, cluster, clusterClass) + By("All cluster objects have the right labels, annotations and selectors") }, 30*time.Second, 1*time.Second).Should(Succeed()) } @@ -590,6 +624,119 @@ func assertMachineDeployments(g Gomega, clusterClassObjects clusterClassObjects, } } +func assertMachinePools(g Gomega, clusterClassObjects clusterClassObjects, clusterObjects clusterObjects, cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) { + for _, machinePool := range clusterObjects.MachinePools { + mpTopology := getMPTopology(cluster, machinePool) + mpClass := getMPClass(cluster, clusterClass, machinePool) + + // MachinePool.metadata + g.Expect(machinePool.Labels).To(BeEquivalentTo( + union( + map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, + }, + mpTopology.Metadata.Labels, + mpClass.Template.Metadata.Labels, + ), + )) + g.Expect( + union( + machinePool.Annotations, + ).without(g, clusterv1.RevisionAnnotation), + ).To(BeEquivalentTo( + union( + mpTopology.Metadata.Annotations, + mpClass.Template.Metadata.Annotations, + ), + )) + + // MachinePool.spec.template.metadata + g.Expect(machinePool.Spec.Template.Labels).To(BeEquivalentTo( + union( + map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, + }, + mpTopology.Metadata.Labels, + mpClass.Template.Metadata.Labels, + ), + )) + g.Expect(machinePool.Spec.Template.Annotations).To(BeEquivalentTo( + union( + mpTopology.Metadata.Annotations, + mpClass.Template.Metadata.Annotations, + ), + )) + + // MachinePool InfrastructureMachineTemplate.metadata + ccInfrastructureMachineTemplate := clusterClassObjects.InfrastructureMachinePoolTemplateByMachinePoolClass[mpClass.Class] + ccInfrastructureMachineTemplateTemplateMetadata := mustMetadata(contract.InfrastructureMachineTemplate().Template().Metadata().Get(ccInfrastructureMachineTemplate)) + infrastructureMachineTemplate := clusterObjects.InfrastructureMachinePoolTemplateByMachinePool[machinePool.Name] + infrastructureMachineTemplateTemplateMetadata := mustMetadata(contract.InfrastructureMachineTemplate().Template().Metadata().Get(infrastructureMachineTemplate)) + g.Expect(infrastructureMachineTemplate.GetLabels()).To(BeEquivalentTo( + union( + map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, + }, + ccInfrastructureMachineTemplate.GetLabels(), + ), + )) + g.Expect(infrastructureMachineTemplate.GetAnnotations()).To(BeEquivalentTo( + union( + map[string]string{ + clusterv1.TemplateClonedFromGroupKindAnnotation: groupKind(mpClass.Template.Infrastructure.Ref), + clusterv1.TemplateClonedFromNameAnnotation: mpClass.Template.Infrastructure.Ref.Name, + }, + ccInfrastructureMachineTemplate.GetAnnotations(), + ).without(g, corev1.LastAppliedConfigAnnotation), + )) + // MachinePool InfrastructureMachineTemplate.spec.template.metadata + g.Expect(infrastructureMachineTemplateTemplateMetadata.Labels).To(BeEquivalentTo( + ccInfrastructureMachineTemplateTemplateMetadata.Labels, + )) + g.Expect(infrastructureMachineTemplateTemplateMetadata.Annotations).To(BeEquivalentTo( + ccInfrastructureMachineTemplateTemplateMetadata.Annotations, + )) + + // MachinePool BootstrapConfigTemplate.metadata + ccBootstrapConfigTemplate := clusterClassObjects.BootstrapConfigTemplateByMachinePoolClass[mpClass.Class] + ccBootstrapConfigTemplateTemplateMetadata := mustMetadata(contract.BootstrapConfigTemplate().Template().Metadata().Get(ccBootstrapConfigTemplate)) + bootstrapConfigTemplate := clusterObjects.BootstrapConfigTemplateByMachinePool[machinePool.Name] + bootstrapConfigTemplateTemplateMetadata := mustMetadata(contract.BootstrapConfigTemplate().Template().Metadata().Get(bootstrapConfigTemplate)) + g.Expect(bootstrapConfigTemplate.GetLabels()).To(BeEquivalentTo( + union( + map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, + }, + ccBootstrapConfigTemplate.GetLabels(), + ), + )) + g.Expect(bootstrapConfigTemplate.GetAnnotations()).To(BeEquivalentTo( + union( + map[string]string{ + clusterv1.TemplateClonedFromGroupKindAnnotation: groupKind(mpClass.Template.Bootstrap.Ref), + clusterv1.TemplateClonedFromNameAnnotation: mpClass.Template.Bootstrap.Ref.Name, + }, + ccBootstrapConfigTemplate.GetAnnotations(), + ).without(g, corev1.LastAppliedConfigAnnotation), + )) + // MachinePool BootstrapConfigTemplate.spec.template.metadata + g.Expect(bootstrapConfigTemplateTemplateMetadata.Labels).To(BeEquivalentTo( + ccBootstrapConfigTemplateTemplateMetadata.Labels, + )) + g.Expect(bootstrapConfigTemplateTemplateMetadata.Annotations).To(BeEquivalentTo( + ccBootstrapConfigTemplateTemplateMetadata.Annotations, + )) + } +} + func assertMachineSets(g Gomega, clusterObjects clusterObjects, cluster *clusterv1.Cluster) { for _, machineDeployment := range clusterObjects.MachineDeployments { mdTopology := getMDTopology(cluster, machineDeployment) @@ -788,6 +935,30 @@ func getMDTopology(cluster *clusterv1.Cluster, md *clusterv1.MachineDeployment) return nil } +// getMPClass looks up the MachinePoolClass for a md in the ClusterClass. +func getMPClass(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass, mp *expv1.MachinePool) *clusterv1.MachinePoolClass { + mpTopology := getMPTopology(cluster, mp) + + for _, mdClass := range clusterClass.Spec.Workers.MachinePools { + if mdClass.Class == mpTopology.Class { + return &mdClass + } + } + Fail(fmt.Sprintf("could not find MachinePool class %q", mpTopology.Class)) + return nil +} + +// getMPTopology looks up the MachinePoolTopology for a mp in the Cluster. +func getMPTopology(cluster *clusterv1.Cluster, mp *expv1.MachinePool) *clusterv1.MachinePoolTopology { + for _, mpTopology := range cluster.Spec.Topology.Workers.MachinePools { + if mpTopology.Name == mp.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] { + return &mpTopology + } + } + Fail(fmt.Sprintf("could not find MachinePool topology %q", mp.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel])) + return nil +} + // groupKind returns the GroupKind string of a ref. func groupKind(ref *corev1.ObjectReference) string { gv, err := schema.ParseGroupVersion(ref.APIVersion) @@ -857,6 +1028,9 @@ type clusterClassObjects struct { InfrastructureMachineTemplateByMachineDeploymentClass map[string]*unstructured.Unstructured BootstrapConfigTemplateByMachineDeploymentClass map[string]*unstructured.Unstructured + + InfrastructureMachinePoolTemplateByMachinePoolClass map[string]*unstructured.Unstructured + BootstrapConfigTemplateByMachinePoolClass map[string]*unstructured.Unstructured } // getClusterClassObjects retrieves objects from the ClusterClass. @@ -903,9 +1077,14 @@ type clusterObjects struct { MachinesByMachineSet map[string][]*clusterv1.Machine NodesByMachine map[string]*corev1.Node + MachinePools []*expv1.MachinePool + InfrastructureMachineTemplateByMachineDeployment map[string]*unstructured.Unstructured BootstrapConfigTemplateByMachineDeployment map[string]*unstructured.Unstructured + InfrastructureMachinePoolTemplateByMachinePool map[string]*unstructured.Unstructured + BootstrapConfigTemplateByMachinePool map[string]*unstructured.Unstructured + InfrastructureMachineByMachine map[string]*unstructured.Unstructured BootstrapConfigByMachine map[string]*unstructured.Unstructured } @@ -1095,3 +1274,49 @@ func modifyMachineDeploymentViaClusterAndWait(ctx context.Context, input modifyM } } } + +// modifyMachinePoolViaClusterAndWaitInput is the input type for modifyMachinePoolViaClusterAndWait. +type modifyMachinePoolViaClusterAndWaitInput struct { + ClusterProxy framework.ClusterProxy + Cluster *clusterv1.Cluster + ModifyMachinePoolTopology func(topology *clusterv1.MachinePoolTopology) + WaitForMachinePools []interface{} +} + +// modifyMachinePoolViaClusterAndWait modifies the MachinePoolTopology of a Cluster topology via ModifyMachinePoolTopology. +// It then waits until the changes are rolled out to the MachinePools of the Cluster. +// NOTE: This helper is really specific to this test, so we are keeping this private vs. adding it to the framework. +func modifyMachinePoolViaClusterAndWait(ctx context.Context, input modifyMachinePoolViaClusterAndWaitInput) { + Expect(ctx).NotTo(BeNil(), "ctx is required for modifyMachinePoolViaClusterAndWait") + Expect(input.ClusterProxy).ToNot(BeNil(), "Invalid argument. input.ClusterProxy can't be nil when calling modifyMachinePoolViaClusterAndWait") + Expect(input.Cluster).ToNot(BeNil(), "Invalid argument. input.Cluster can't be nil when calling modifyMachinePoolViaClusterAndWait") + + mgmtClient := input.ClusterProxy.GetClient() + + for i, mpTopology := range input.Cluster.Spec.Topology.Workers.MachinePools { + log.Logf("Modifying the MachinePool topology %q of ClusterClass %s", mpTopology.Name, klog.KObj(input.Cluster)) + + // Patch the MachinePool topology in the Cluster. + patchHelper, err := patch.NewHelper(input.Cluster, mgmtClient) + Expect(err).ToNot(HaveOccurred()) + input.ModifyMachinePoolTopology(&input.Cluster.Spec.Topology.Workers.MachinePools[i]) + Expect(patchHelper.Patch(ctx, input.Cluster)).To(Succeed()) + + for _, mpTopology := range input.Cluster.Spec.Topology.Workers.MachinePools { + // NOTE: We only wait until the change is rolled out to the MachinePools and not to the worker machines. + log.Logf("Waiting for MachinePool rollout for MachinePoolTopology %q to complete.", mpTopology.Name) + Eventually(func(g Gomega) { + // Get MachinePool for the current MachinePoolTopology. + mpList := &expv1.MachinePoolList{} + g.Expect(mgmtClient.List(ctx, mpList, client.InNamespace(input.Cluster.Namespace), client.MatchingLabels{ + clusterv1.ClusterTopologyMachinePoolNameLabel: mpTopology.Name, + })).To(Succeed()) + g.Expect(mpList.Items).To(HaveLen(1), fmt.Sprintf("expected one MachinePool for topology %q, but got %d", mpTopology.Name, len(mpList.Items))) + mp := mpList.Items[0] + + // Verify that the fields from Cluster topology are set on the MachinePool. + assertMachinePoolTopologyFields(g, mp, mpTopology) + }, input.WaitForMachinePools...).Should(BeNil()) + } + } +} diff --git a/test/e2e/self_hosted.go b/test/e2e/self_hosted.go index 0924ac0e9daa..484dea5a0943 100644 --- a/test/e2e/self_hosted.go +++ b/test/e2e/self_hosted.go @@ -365,19 +365,18 @@ func SelfHostedSpec(ctx context.Context, inputGetter func() SelfHostedSpecInput) MachineDeployments: clusterResources.MachineDeployments, WaitForMachinesToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-worker-nodes"), }) - } - } - // Only attempt to upgrade MachinePools if they were provided in the template. - if len(clusterResources.MachinePools) > 0 && workerMachineCount > 0 { - By("Upgrading the machinepool instances") - framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{ - ClusterProxy: selfHostedClusterProxy, - Cluster: clusterResources.Cluster, - UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), - WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), - MachinePools: clusterResources.MachinePools, - }) + if len(clusterResources.MachinePools) > 0 { + By("Upgrading the machinepool instances") + framework.UpgradeMachinePoolAndWait(ctx, framework.UpgradeMachinePoolAndWaitInput{ + ClusterProxy: selfHostedClusterProxy, + Cluster: clusterResources.Cluster, + UpgradeVersion: input.E2EConfig.GetVariable(KubernetesVersionUpgradeTo), + WaitForMachinePoolToBeUpgraded: input.E2EConfig.GetIntervals(specName, "wait-machine-pool-upgrade"), + MachinePools: clusterResources.MachinePools, + }) + } + } } By("Waiting until nodes are ready")