From da986d5f97b05dc9479fcfdf5edac3ca812eff37 Mon Sep 17 00:00:00 2001 From: killianmuldoon Date: Thu, 10 Aug 2023 19:04:59 +0100 Subject: [PATCH] Add ownerReference resilience test Signed-off-by: killianmuldoon --- controllers/vspherecluster_reconciler.go | 70 +++-- pkg/identity/identity.go | 2 +- test/e2e/README.md | 42 +-- test/e2e/capv_clusterclass_quickstart_test.go | 36 --- test/e2e/capv_quick_start_test.go | 58 ++++ test/e2e/govmomi_test.go | 1 + test/helpers/ownerreference_helpers.go | 287 ++++++++++++++++++ 7 files changed, 407 insertions(+), 89 deletions(-) delete mode 100644 test/e2e/capv_clusterclass_quickstart_test.go create mode 100644 test/helpers/ownerreference_helpers.go diff --git a/controllers/vspherecluster_reconciler.go b/controllers/vspherecluster_reconciler.go index b17588bdb1..705be41923 100644 --- a/controllers/vspherecluster_reconciler.go +++ b/controllers/vspherecluster_reconciler.go @@ -284,38 +284,46 @@ func (r clusterReconciler) reconcileNormal(ctx *context.ClusterContext) (reconci func (r clusterReconciler) reconcileIdentitySecret(ctx *context.ClusterContext) error { vsphereCluster := ctx.VSphereCluster - if identity.IsSecretIdentity(vsphereCluster) { - secret := &corev1.Secret{} - secretKey := client.ObjectKey{ - Namespace: vsphereCluster.Namespace, - Name: vsphereCluster.Spec.IdentityRef.Name, - } - err := ctx.Client.Get(ctx, secretKey, secret) - if err != nil { - return err - } + if !identity.IsSecretIdentity(vsphereCluster) { + return nil + } + secret := &corev1.Secret{} + secretKey := client.ObjectKey{ + Namespace: vsphereCluster.Namespace, + Name: vsphereCluster.Spec.IdentityRef.Name, + } + err := ctx.Client.Get(ctx, secretKey, secret) + if err != nil { + return err + } - // check if cluster is already an owner - if !clusterutilv1.IsOwnedByObject(secret, vsphereCluster) { - ownerReferences := secret.GetOwnerReferences() - if identity.IsOwnedByIdentityOrCluster(ownerReferences) { - return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name) - } - ownerReferences = append(ownerReferences, metav1.OwnerReference{ - APIVersion: infrav1.GroupVersion.String(), - Kind: vsphereCluster.Kind, - Name: vsphereCluster.Name, - UID: vsphereCluster.UID, - }) - secret.SetOwnerReferences(ownerReferences) - } - if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { - ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) - } - err = r.Client.Update(ctx, secret) - if err != nil { - return err - } + // If a different VSphereCluster is an owner return an error. + if !clusterutilv1.IsOwnedByObject(secret, vsphereCluster) && identity.IsOwnedByIdentityOrCluster(secret.GetOwnerReferences()) { + return fmt.Errorf("another cluster has set the OwnerRef for secret: %s/%s", secret.Namespace, secret.Name) + } + + helper, err := patch.NewHelper(secret, ctx.Client) + if err != nil { + return err + } + + // Ensure the VSphereCluster is an owner and that the APIVersion is up to date. + secret.SetOwnerReferences(clusterutilv1.EnsureOwnerRef(secret.GetOwnerReferences(), + metav1.OwnerReference{ + APIVersion: infrav1.GroupVersion.String(), + Kind: vsphereCluster.Kind, + Name: vsphereCluster.Name, + UID: vsphereCluster.UID, + }, + )) + + // Ensure the finalizer is added. + if !ctrlutil.ContainsFinalizer(secret, infrav1.SecretIdentitySetFinalizer) { + ctrlutil.AddFinalizer(secret, infrav1.SecretIdentitySetFinalizer) + } + err = helper.Patch(ctx, secret) + if err != nil { + return err } return nil diff --git a/pkg/identity/identity.go b/pkg/identity/identity.go index ced2487c92..1dc6b627b4 100644 --- a/pkg/identity/identity.go +++ b/pkg/identity/identity.go @@ -122,11 +122,11 @@ func validateInputs(c client.Client, cluster *infrav1.VSphereCluster) error { return nil } +// IsSecretIdentity returns true if the VsphereCluster identity is a Secret. func IsSecretIdentity(cluster *infrav1.VSphereCluster) bool { if cluster == nil || cluster.Spec.IdentityRef == nil { return false } - return cluster.Spec.IdentityRef.Kind == infrav1.SecretKind } diff --git a/test/e2e/README.md b/test/e2e/README.md index a5c3a3225b..d56214b42d 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -14,36 +14,36 @@ In order to run the e2e tests the following requirements must be met: * The testing must occur on a host that can access the VMs deployed to vSphere via the network * Ginkgo ([download](https://onsi.github.io/ginkgo/#getting-ginkgo)) * Docker ([download](https://www.docker.com/get-started)) -* Kind v0.7.0+ ([download](https://kind.sigs.k8s.io)) +* Kind v0.20.0+ ([download](https://kind.sigs.k8s.io)) ### Environment variables The first step to running the e2e tests is setting up the required environment variables: -| Environment variable | Description | Example | -| ----------------------------- | ----------------------------------------------------------------------------------- | -------------------------------------------------------------------------------- | -| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` | -| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` | -| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` | -| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` | -| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` | -| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` | -| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` | -| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` | -| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | -| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | -| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | -| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` | -| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | +| Environment variable | Description | Example | +|------------------------------|-------------------------------------------------------------------------------------|----------------------------------------------------------------------------------| +| `VSPHERE_SERVER` | The IP address or FQDN of a vCenter 6.7u3 server | `my.vcenter.com` | +| `VSPHERE_USERNAME` | The username used to access the vSphere server | `my-username` | +| `VSPHERE_PASSWORD` | The password used to access the vSphere server | `my-password` | +| `VSPHERE_DATACENTER` | The unique name or inventory path of the datacenter in which VMs will be created | `my-datacenter` or `/my-datacenter` | +| `VSPHERE_FOLDER` | The unique name or inventory path of the folder in which VMs will be created | `my-folder` or `/my-datacenter/vm/my-folder` | +| `VSPHERE_RESOURCE_POOL` | The unique name or inventory path of the resource pool in which VMs will be created | `my-resource-pool` or `/my-datacenter/host/Cluster-1/Resources/my-resource-pool` | +| `VSPHERE_DATASTORE` | The unique name or inventory path of the datastore in which VMs will be created | `my-datastore` or `/my-datacenter/datstore/my-datastore` | +| `VSPHERE_NETWORK` | The unique name or inventory path of the network to which VMs will be connected | `my-network` or `/my-datacenter/network/my-network` | +| `VSPHERE_SSH_PRIVATE_KEY` | The file path of the private key used to ssh into the CAPV VMs | `/home/foo/bar-ssh.key` | +| `VSPHERE_SSH_AUTHORIZED_KEY` | The public key that is added to the CAPV VMs | `ssh-rsa ABCDEF...XYZ=` | +| `VSPHERE_TLS_THUMBPRINT` | The TLS thumbprint of the vSphere server's certificate which should be trusted | `2A:3F:BC:CA:C0:96:35:D4:B7:A2:AA:3C:C1:33:D9:D7:BE:EC:31:55` | +| `CONTROL_PLANE_ENDPOINT_IP` | The IP that kube-vip should use as a control plane endpoint | `10.10.123.100` | +| `VSPHERE_STORAGE_POLICY` | The name of an existing vSphere storage policy to be assigned to created VMs | `my-test-sp` | ### Flags | Flag | Description | Default Value | -|-------------------------|----------------------------------------------------------------------------------------------------------|-----------| -| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | -| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | -| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | -| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | +|-------------------------|----------------------------------------------------------------------------------------------------------|---------------| +| `SKIP_RESOURCE_CLEANUP` | This flags skips cleanup of the resources created during the tests as well as the kind/bootstrap cluster | `false` | +| `USE_EXISTING_CLUSTER` | This flag enables the usage of an existing K8S cluster as the management cluster to run tests against. | `false` | +| `GINKGO_TEST_TIMEOUT` | This sets the timeout for the E2E test suite. | `2h` | +| `GINKGO_FOCUS` | This populates the `-focus` flag of the `ginkgo` run command. | `""` | ### Running the e2e tests diff --git a/test/e2e/capv_clusterclass_quickstart_test.go b/test/e2e/capv_clusterclass_quickstart_test.go deleted file mode 100644 index b44851fa7c..0000000000 --- a/test/e2e/capv_clusterclass_quickstart_test.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package e2e - -import ( - . "github.com/onsi/ginkgo/v2" - "k8s.io/utils/pointer" - capi_e2e "sigs.k8s.io/cluster-api/test/e2e" -) - -var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-Blocking] [ClusterClass]", func() { - capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { - return capi_e2e.QuickStartSpecInput{ - E2EConfig: e2eConfig, - ClusterctlConfigPath: clusterctlConfigPath, - BootstrapClusterProxy: bootstrapClusterProxy, - ArtifactFolder: artifactFolder, - SkipCleanup: skipCleanup, - Flavor: pointer.String("topology"), - } - }) -}) diff --git a/test/e2e/capv_quick_start_test.go b/test/e2e/capv_quick_start_test.go index 0b6b7abc79..84a02d9532 100644 --- a/test/e2e/capv_quick_start_test.go +++ b/test/e2e/capv_quick_start_test.go @@ -18,7 +18,11 @@ package e2e import ( . "github.com/onsi/ginkgo/v2" + "k8s.io/utils/pointer" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" + "sigs.k8s.io/cluster-api/test/framework" + + "sigs.k8s.io/cluster-api-provider-vsphere/test/helpers" ) var _ = Describe("Cluster Creation using Cluster API quick-start test [PR-Blocking]", func() { @@ -29,6 +33,60 @@ var _ = Describe("Cluster Creation using Cluster API quick-start test [PR-Blocki BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, + PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, + framework.CoreOwnerReferenceAssertion, + framework.KubeadmBootstrapOwnerReferenceAssertions, + framework.KubeadmControlPlaneOwnerReferenceAssertions, + helpers.VSphereKubernetesReferenceAssertions, + helpers.VSphereExpOwnerReferenceAssertions, + helpers.VSphereReferenceAssertions, + ) + + // This check ensures that owner references are always updated to the most recent apiVersion. + helpers.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, + framework.CoreOwnerReferenceAssertion, + framework.KubeadmBootstrapOwnerReferenceAssertions, + framework.KubeadmControlPlaneOwnerReferenceAssertions, + helpers.VSphereKubernetesReferenceAssertions, + helpers.VSphereExpOwnerReferenceAssertions, + helpers.VSphereReferenceAssertions, + ) + }, + } + }) +}) + +var _ = Describe("ClusterClass Creation using Cluster API quick-start test [PR-Blocking] [ClusterClass]", func() { + capi_e2e.QuickStartSpec(ctx, func() capi_e2e.QuickStartSpecInput { + return capi_e2e.QuickStartSpecInput{ + E2EConfig: e2eConfig, + ClusterctlConfigPath: clusterctlConfigPath, + BootstrapClusterProxy: bootstrapClusterProxy, + ArtifactFolder: artifactFolder, + SkipCleanup: skipCleanup, + Flavor: pointer.String("topology"), + PostMachinesProvisioned: func(proxy framework.ClusterProxy, namespace, clusterName string) { + // This check ensures that owner references are resilient - i.e. correctly re-reconciled - when removed. + framework.ValidateOwnerReferencesResilience(ctx, proxy, namespace, clusterName, + framework.CoreOwnerReferenceAssertion, + framework.KubeadmBootstrapOwnerReferenceAssertions, + framework.KubeadmControlPlaneOwnerReferenceAssertions, + helpers.VSphereKubernetesReferenceAssertions, + helpers.VSphereExpOwnerReferenceAssertions, + helpers.VSphereReferenceAssertions, + ) + // This check ensures that owner references are always updated to the most recent apiVersion. + helpers.ValidateOwnerReferencesOnUpdate(ctx, proxy, namespace, clusterName, + framework.CoreOwnerReferenceAssertion, + framework.KubeadmBootstrapOwnerReferenceAssertions, + framework.KubeadmControlPlaneOwnerReferenceAssertions, + helpers.VSphereKubernetesReferenceAssertions, + helpers.VSphereExpOwnerReferenceAssertions, + helpers.VSphereReferenceAssertions, + ) + }, } }) }) diff --git a/test/e2e/govmomi_test.go b/test/e2e/govmomi_test.go index 017c8fdea0..14828a772b 100644 --- a/test/e2e/govmomi_test.go +++ b/test/e2e/govmomi_test.go @@ -55,6 +55,7 @@ func initVSphereSession() { By("parsing vSphere server URL") serverURL, err := soap.ParseURL(vsphereServer) Expect(err).ShouldNot(HaveOccurred()) + Expect(serverURL).ToNot(BeNil()) var vimClient *vim25.Client diff --git a/test/helpers/ownerreference_helpers.go b/test/helpers/ownerreference_helpers.go new file mode 100644 index 0000000000..de8dd2579c --- /dev/null +++ b/test/helpers/ownerreference_helpers.go @@ -0,0 +1,287 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helpers + +import ( + "context" + "fmt" + "reflect" + "sort" + "time" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + kerrors "k8s.io/apimachinery/pkg/util/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" + clusterctlcluster "sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster" + controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" + addonsv1 "sigs.k8s.io/cluster-api/exp/addons/api/v1beta1" + "sigs.k8s.io/cluster-api/test/framework" + "sigs.k8s.io/cluster-api/util/patch" + "sigs.k8s.io/controller-runtime/pkg/client" + + infrav1 "sigs.k8s.io/cluster-api-provider-vsphere/apis/v1beta1" +) + +// KubernetesReferenceAssertions maps Kubernetes types to functions which return an error if the passed OwnerReferences +// aren't as expected. +var ( + VSphereKubernetesReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ + secretKind: func(owners []metav1.OwnerReference) error { + // Secrets for cluster certificates must be owned by the KubeadmControlPlane. The bootstrap secret should be owned by a KubeadmControlPlane. + return HasOneOfExactOwnersByGVK(owners, + []schema.GroupVersionKind{kubeadmControlPlaneGVK}, + []schema.GroupVersionKind{kubeadmConfigGVK}, + []schema.GroupVersionKind{clusterResourceSetGVK}, + []schema.GroupVersionKind{vSphereClusterGVK}) + }, + configMapKind: func(owners []metav1.OwnerReference) error { + // The only configMaps considered here are those owned by a ClusterResourceSet. + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{clusterResourceSetGVK}) + }, + } + + VSphereExpOwnerReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ + clusterResourceSetKind: func(owners []metav1.OwnerReference) error { + // ClusterResourcesSet doesn't have ownerReferences (it is a clusterctl move-hierarchy root). + return HasExactOwnersByGVK(owners, []schema.GroupVersionKind{}) + }, + // ClusterResourcesSetBinding has ClusterResourceSet set as owners on creation. + clusterResourceSetBindingKind: func(owners []metav1.OwnerReference) error { + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{clusterResourceSetGVK}, []schema.GroupVersionKind{clusterResourceSetGVK, clusterResourceSetGVK}) + }, + // MachinePool must be owned by a Cluster. + machinePoolKind: func(owners []metav1.OwnerReference) error { + // MachinePools must be owned by a Cluster. + return HasExactOwnersByGVK(owners, []schema.GroupVersionKind{clusterGVK}) + }, + } +) + +var ( + VSphereClusterIdentityKind = "VSphereClusterIdentity" + VSphereFailureDomainsKind = "VSphereFailureDomains" + vSphereDeploymentZonesKind = "vSphereDeploymentZones" + + vSphereClusterKind = "VSphereCluster" + vSphereClusterTemplateKind = "VSphereClusterTemplate" + + vSphereMachineKind = "VSphereMachine" + vSphereMachineTemplateKind = "VSphereMachineTemplate" + vSphereVMKind = "VSphereVM" + + vSphereMachineGVK = infrav1.GroupVersion.WithKind(vSphereMachineKind) + + vSphereClusterGVK = infrav1.GroupVersion.WithKind(vSphereClusterKind) + VSphereReferenceAssertions = map[string]func([]metav1.OwnerReference) error{ + vSphereClusterKind: func(owners []metav1.OwnerReference) error { + // The only configMaps considered here are those owned by a ClusterResourceSet. + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{clusterGVK}) + }, + vSphereClusterTemplateKind: func(owners []metav1.OwnerReference) error { + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{clusterClassGVK}) + }, + vSphereMachineKind: func(owners []metav1.OwnerReference) error { + // The only configMaps considered here are those owned by a ClusterResourceSet. + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{vSphereClusterGVK, machineGVK}) + }, + vSphereMachineTemplateKind: func(owners []metav1.OwnerReference) error { + // The only configMaps considered here are those owned by a ClusterResourceSet. + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{clusterGVK}, []schema.GroupVersionKind{clusterClassGVK}) + }, + vSphereVMKind: func(owners []metav1.OwnerReference) error { + // The only configMaps considered here are those owned by a ClusterResourceSet. + return HasOneOfExactOwnersByGVK(owners, []schema.GroupVersionKind{vSphereMachineGVK}) + }, + VSphereClusterIdentityKind: func(owners []metav1.OwnerReference) error { return errors.New("IMPLEMENT ME") }, + VSphereFailureDomainsKind: func(owners []metav1.OwnerReference) error { return errors.New("IMPLEMENT ME") }, + vSphereDeploymentZonesKind: func(owners []metav1.OwnerReference) error { return errors.New("IMPLEMENT ME") }, + } +) + +// HasExactOwnersByGVK returns an error if the wantGVKList does not matches the supplied OwnerReference list. +func HasExactOwnersByGVK(refList []metav1.OwnerReference, wantGVKLists []schema.GroupVersionKind) error { + refGVKs := []schema.GroupVersionKind{} + for _, ref := range refList { + refGVK, err := ownerRefGVK(ref) + if err != nil { + return err + } + refGVKs = append(refGVKs, refGVK) + } + sort.SliceStable(refGVKs, func(i int, j int) bool { + return refGVKs[i].String() > refGVKs[j].String() + }) + sort.SliceStable(wantGVKLists, func(i int, j int) bool { + return wantGVKLists[i].String() > wantGVKLists[j].String() + }) + if !reflect.DeepEqual(wantGVKLists, refGVKs) { + return fmt.Errorf("wanted %v, actual %v", wantGVKLists, refGVKs) + } + return nil +} + +// HasOneOfExactOwnersByGVK returns an error if none of the possibleGVKLists matches the supplied OwnerReference list. +// NOTE: we are using HasOneOfExactOwnersByGVK as a convenience approach for checking owner references on objects that +// can have different owner references depending on the cluster topology. +// In a follow-up iteration we can make improvements to check owner references according to the specific use cases vs checking generically "oneOf". +func HasOneOfExactOwnersByGVK(refList []metav1.OwnerReference, possibleGVKLists ...[]schema.GroupVersionKind) error { + var allErrs []error + for _, wantGVK := range possibleGVKLists { + err := HasExactOwnersByGVK(refList, wantGVK) + if err != nil { + allErrs = append(allErrs, err) + continue + } + return nil + } + return kerrors.NewAggregate(allErrs) +} + +func ownerRefGVK(ref metav1.OwnerReference) (schema.GroupVersionKind, error) { + refGV, err := schema.ParseGroupVersion(ref.APIVersion) + if err != nil { + return schema.GroupVersionKind{}, err + } + return schema.GroupVersionKind{Version: refGV.Version, Group: refGV.Group, Kind: ref.Kind}, nil +} + +var ( + clusterKind = "Cluster" + clusterClassKind = "ClusterClass" + + machineKind = "Machine" + + clusterGVK = clusterv1.GroupVersion.WithKind(clusterKind) + clusterClassGVK = clusterv1.GroupVersion.WithKind(clusterClassKind) + + machineGVK = clusterv1.GroupVersion.WithKind(machineKind) +) + +var ( + clusterResourceSetKind = "ClusterResourceSet" + clusterResourceSetBindingKind = "ClusterResourceSetBinding" + machinePoolKind = "MachinePool" + + clusterResourceSetGVK = addonsv1.GroupVersion.WithKind(clusterResourceSetKind) +) + +var ( + configMapKind = "ConfigMap" + secretKind = "Secret" +) + +var ( + kubeadmControlPlaneKind = "KubeadmControlPlane" + + kubeadmControlPlaneGVK = controlplanev1.GroupVersion.WithKind(kubeadmControlPlaneKind) +) + +var ( + kubeadmConfigKind = "KubeadmConfig" + + kubeadmConfigGVK = bootstrapv1.GroupVersion.WithKind(kubeadmConfigKind) +) + +// ValidateOwnerReferencesOnUpdate checks that expected owner references are updated to the correct apiVersion. +func ValidateOwnerReferencesOnUpdate(ctx context.Context, proxy framework.ClusterProxy, namespace, clusterName string, assertFuncs ...map[string]func(reference []metav1.OwnerReference) error) { + clusterKey := client.ObjectKey{Namespace: namespace, Name: clusterName} + + // Changes the version of all the owner references to v1alpha1. Expect the apiVersion to be updated after reconciliation. + setClusterPause(ctx, proxy.GetClient(), clusterKey, true) + + // Change the version of the OwnerReferences on each object in the Graph to "v1alpha1" + changeOwnerReferencesAPIVersion(ctx, proxy, namespace) + + setClusterPause(ctx, proxy.GetClient(), clusterKey, false) + + // Annotate the clusterClass, if one is in use, to speed up reconciliation. This ensures ClusterClass ownerReferences + // are re-reconciled before asserting the owner reference graph. + forceClusterClassReconcile(ctx, proxy.GetClient(), clusterKey) + + // For CRS to reconcile using an annotation. This ensures ClusterResourceSet ownerReferences + // are re-reconciled before asserting the owner reference graph. + forceClusterResourceSetReconcile(ctx, proxy.GetClient()) + + // Check that the ownerReferences have updated their apiVersions to current versions after reconciliation. + framework.AssertOwnerReferences(namespace, proxy.GetKubeconfigPath(), assertFuncs...) +} + +func setClusterPause(ctx context.Context, cli client.Client, clusterKey types.NamespacedName, value bool) { + cluster := &clusterv1.Cluster{} + Expect(cli.Get(ctx, clusterKey, cluster)).To(Succeed()) + + pausePatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"spec\":{\"paused\":%v}}", value))) + Expect(cli.Patch(ctx, cluster, pausePatch)).To(Succeed()) +} + +// forceClusterClassReconcile force reconciliation of the ClusterClass associated with the Cluster if one exists. If the +// Cluster has no ClusterClass this is a no-op. +func forceClusterClassReconcile(ctx context.Context, cli client.Client, clusterKey types.NamespacedName) { + cluster := &clusterv1.Cluster{} + Expect(cli.Get(ctx, clusterKey, cluster)).To(Succeed()) + + if cluster.Spec.Topology != nil { + class := &clusterv1.ClusterClass{} + Expect(cli.Get(ctx, client.ObjectKey{Namespace: clusterKey.Namespace, Name: cluster.Spec.Topology.Class}, class)).To(Succeed()) + annotationPatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) + Expect(cli.Patch(ctx, class, annotationPatch)).To(Succeed()) + } +} + +// forceClusterResourceSetReconcile force reconciliation of all ClusterResourceSets. +func forceClusterResourceSetReconcile(ctx context.Context, cli client.Client) { + crsList := &addonsv1.ClusterResourceSetList{} + Expect(cli.List(ctx, crsList)).To(Succeed()) + for _, crs := range crsList.Items { + annotationPatch := client.RawPatch(types.MergePatchType, []byte(fmt.Sprintf("{\"metadata\":{\"annotations\":{\"cluster.x-k8s.io/modifiedAt\":\"%v\"}}}", time.Now().Format(time.RFC3339)))) + Expect(cli.Patch(ctx, &crs, annotationPatch)).To(Succeed()) + } +} + +func changeOwnerReferencesAPIVersion(ctx context.Context, proxy framework.ClusterProxy, namespace string) { + graph, err := clusterctlcluster.GetOwnerGraph(namespace, proxy.GetKubeconfigPath()) + Expect(err).ToNot(HaveOccurred()) + for _, object := range graph { + ref := object.Object + obj := new(unstructured.Unstructured) + obj.SetAPIVersion(ref.APIVersion) + obj.SetKind(ref.Kind) + obj.SetName(ref.Name) + + Expect(proxy.GetClient().Get(ctx, client.ObjectKey{Namespace: namespace, Name: object.Object.Name}, obj)).To(Succeed()) + helper, err := patch.NewHelper(obj, proxy.GetClient()) + Expect(err).ToNot(HaveOccurred()) + + newOwners := []metav1.OwnerReference{} + for _, owner := range obj.GetOwnerReferences() { + gv, err := schema.ParseGroupVersion(owner.APIVersion) + Expect(err).To(Succeed()) + gv.Version = "v1alpha1" + owner.APIVersion = gv.String() + newOwners = append(newOwners, owner) + } + + obj.SetOwnerReferences(newOwners) + Expect(helper.Patch(ctx, obj)).To(Succeed()) + } +}