diff --git a/test/integration-test.mk b/test/integration-test.mk index f6aa5e3e1..96a771513 100644 --- a/test/integration-test.mk +++ b/test/integration-test.mk @@ -2,7 +2,7 @@ TEST_TMP :=/tmp export KUBEBUILDER_ASSETS ?=$(TEST_TMP)/kubebuilder/bin -K8S_VERSION ?=1.19.2 +K8S_VERSION ?=1.23.1 KB_TOOLS_ARCHIVE_NAME :=kubebuilder-tools-$(K8S_VERSION)-$(GOHOSTOS)-$(GOHOSTARCH).tar.gz KB_TOOLS_ARCHIVE_PATH := $(TEST_TMP)/$(KB_TOOLS_ARCHIVE_NAME) diff --git a/test/integration/deleteoption_test.go b/test/integration/deleteoption_test.go new file mode 100644 index 000000000..020732a73 --- /dev/null +++ b/test/integration/deleteoption_test.go @@ -0,0 +1,545 @@ +package integration + +import ( + "context" + "fmt" + "time" + + "github.com/onsi/ginkgo" + "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilrand "k8s.io/apimachinery/pkg/util/rand" + workapiv1 "open-cluster-management.io/api/work/v1" + "open-cluster-management.io/work/pkg/spoke" + "open-cluster-management.io/work/test/integration/util" +) + +var _ = ginkgo.Describe("ManifestWork Status Feedback", func() { + var o *spoke.WorkloadAgentOptions + var cancel context.CancelFunc + + var work *workapiv1.ManifestWork + var appliedManifestWorkName string + var manifests []workapiv1.Manifest + + var err error + + ginkgo.BeforeEach(func() { + o = spoke.NewWorkloadAgentOptions() + o.HubKubeconfigFile = hubKubeconfigFileName + o.SpokeClusterName = utilrand.String(5) + o.StatusSyncInterval = 3 * time.Second + + ns := &corev1.Namespace{} + ns.Name = o.SpokeClusterName + _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + var ctx context.Context + ctx, cancel = context.WithCancel(context.Background()) + go startWorkAgent(ctx, o) + + // reset manifests + manifests = nil + }) + + ginkgo.JustBeforeEach(func() { + work = util.NewManifestWork(o.SpokeClusterName, "", manifests) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.AfterEach(func() { + if cancel != nil { + cancel() + } + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), o.SpokeClusterName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.Context("Resource sharing and adoption between manifestworks", func() { + var anotherWork *workapiv1.ManifestWork + var anotherAppliedManifestWorkName string + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + } + // Create another manifestworks with one shared resource. + anotherWork = util.NewManifestWork(o.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]}) + }) + + ginkgo.JustBeforeEach(func() { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, anotherWork.Name) + }) + + ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { + // ensure configmap exists and get its uid + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + currentUID := curentConfigMap.UID + + // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete one manifestwork + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource + gomega.Eventually(func() bool { + _, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. + gomega.Eventually(func() error { + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if currentUID != configMap.UID { + return fmt.Errorf("UID should be equal") + } + + anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources { + if appliedResource.Name != "cm1" { + return fmt.Errorf("Resource Name should be cm1") + } + + if appliedResource.UID != string(currentUID) { + return fmt.Errorf("UID should be equal") + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { + // ensure configmap exists and get its uid + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + currentUID := curentConfigMap.UID + + // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. + gomega.Eventually(func() error { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { + return nil + } + } + + return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Update one manifestwork to remove the shared resource + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + work.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]} + _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Ensure the resource is not tracked by the appliedmanifestwork. + gomega.Eventually(func() bool { + appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return false + } + + for _, appliedResource := range appliedManifestWork.Status.AppliedResources { + if appliedResource.Name == "cm1" { + return false + } + } + + return true + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure the configmap is kept and tracked by anotherappliedmanifestwork + gomega.Eventually(func() error { + configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if currentUID != configMap.UID { + return fmt.Errorf("UID should be equal") + } + + anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) + if err != nil { + return err + } + + for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { + if appliedResource.Name != "cm1" { + return fmt.Errorf("Resource Name should be cm1") + } + + if appliedResource.UID != string(currentUID) { + return fmt.Errorf("UID should be equal") + } + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + }) + + }) + + ginkgo.Context("Delete options", func() { + ginkgo.BeforeEach(func() { + manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + } + }) + + ginkgo.It("Orphan deletion of the whole manifestwork", func() { + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + }) + + ginkgo.It("Selectively Orphan deletion of the manifestwork", func() { + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "configmaps", + Namespace: o.SpokeClusterName, + Name: "cm1", + }, + }, + }, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // One of the resource should be deleted. + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + + // One of the resource should be kept + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.It("Keep the resource when remove it from manifests with orphan deletion option", func() { + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "configmaps", + Namespace: o.SpokeClusterName, + Name: "cm1", + }, + }, + }, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Remove the resource from the manifests + gomega.Eventually(func() error { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.Workload.Manifests = []workapiv1.Manifest{ + util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), + } + _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Sleep 5 second and check the resource should be kept + time.Sleep(5 * time.Second) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + ginkgo.It("Clean the resource when orphan deletion option is removed", func() { + work.Spec.DeleteOption = &workapiv1.DeleteOption{ + PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, + SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ + OrphaningRules: []workapiv1.OrphaningRule{ + { + Group: "", + Resource: "configmaps", + Namespace: o.SpokeClusterName, + Name: "cm1", + }, + }, + }, + } + + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + appliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, work.Name) + + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, + []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) + + // Ensure configmap exists + util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 0 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Remove the delete option + gomega.Eventually(func() error { + work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + work.Spec.DeleteOption = nil + _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Ensure ownership of configmap is updated + gomega.Eventually(func() error { + cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + if len(cm.OwnerReferences) != 1 { + return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // Delete the work + err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + // Wait for deletion of manifest work + gomega.Eventually(func() bool { + _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) + return errors.IsNotFound(err) + }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) + + // All of the resource should be deleted. + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) + gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) + }) + }) +}) diff --git a/test/integration/work_test.go b/test/integration/work_test.go index 67165b80c..4702a755b 100644 --- a/test/integration/work_test.go +++ b/test/integration/work_test.go @@ -277,11 +277,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { // wait for deletion of manifest work gomega.Eventually(func() bool { _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if !errors.IsNotFound(err) { - return false - } - - return true + return errors.IsNotFound(err) }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) // Once manifest work is deleted, all CRs/CRD should have already been deleted too @@ -408,11 +404,7 @@ var _ = ginkgo.Describe("ManifestWork", func() { } sa, _, _ := unstructured.NestedString(u.Object, "spec", "template", "spec", "serviceAccountName") - if "admin" != sa { - return false - } - - return true + return sa == "admin" }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) ginkgo.By("check if LastTransitionTime is updated") @@ -558,421 +550,4 @@ var _ = ginkgo.Describe("ManifestWork", func() { util.AssertAppliedManifestWorkDeleted(appliedManifestWork.Name, spokeWorkClient, eventuallyTimeout, eventuallyInterval) }) }) - - ginkgo.Context("Resource sharing and adoption between manifestworks", func() { - var anotherWork *workapiv1.ManifestWork - var anotherAppliedManifestWorkName string - ginkgo.BeforeEach(func() { - manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), - } - }) - - ginkgo.JustBeforeEach(func() { - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // Create another manifestworks with one shared resource. - anotherWork = util.NewManifestWork(o.SpokeClusterName, "sharing-resource-work", []workapiv1.Manifest{manifests[0]}) - anotherWork, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Create(context.Background(), anotherWork, metav1.CreateOptions{}) - anotherAppliedManifestWorkName = fmt.Sprintf("%s-%s", hubHash, anotherWork.Name) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.It("shared resource between the manifestwork should be kept when one manifestwork is deleted", func() { - // Ensure two manifestworks are all applied - util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // ensure configmap exists and get its uid - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - currentUID := curentConfigMap.UID - - // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete one manifestwork - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure the appliedmanifestwork of deleted manifestwork is removed so it won't try to delete shared resource - gomega.Eventually(func() bool { - _, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - return true - } - return false - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // Ensure the configmap is kept and tracked by anotherappliedmanifestwork. - gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if currentUID != configMap.UID { - return fmt.Errorf("UID should be equal") - } - - anotherappliedmanifestwork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherappliedmanifestwork.Status.AppliedResources { - if appliedResource.Name != "cm1" { - return fmt.Errorf("Resource Name should be cm1") - } - - if appliedResource.UID != string(currentUID) { - return fmt.Errorf("UID should be equal") - } - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - ginkgo.It("shared resource between the manifestwork should be kept when the shared resource is removed from one manifestwork", func() { - util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(anotherWork.Namespace, anotherWork.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - - // ensure configmap exists and get its uid - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - curentConfigMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - currentUID := curentConfigMap.UID - - // Ensure that uid recorded in the appliedmanifestwork and anotherappliedmanifestwork is correct. - gomega.Eventually(func() error { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" && appliedResource.UID == string(currentUID) { - return nil - } - } - - return fmt.Errorf("Resource name or uid in appliedmanifestwork does not match") - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Update one manifestwork to remove the shared resource - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - work.Spec.Workload.Manifests = []workapiv1.Manifest{manifests[1]} - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure the resource is not tracked by the appliedmanifestwork. - gomega.Eventually(func() bool { - appliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return false - } - - for _, appliedResource := range appliedManifestWork.Status.AppliedResources { - if appliedResource.Name == "cm1" { - return false - } - } - - return true - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // Ensure the configmap is kept and tracked by anotherappliedmanifestwork - gomega.Eventually(func() error { - configMap, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if currentUID != configMap.UID { - return fmt.Errorf("UID should be equal") - } - - anotherAppliedManifestWork, err := spokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), anotherAppliedManifestWorkName, metav1.GetOptions{}) - if err != nil { - return err - } - - for _, appliedResource := range anotherAppliedManifestWork.Status.AppliedResources { - if appliedResource.Name != "cm1" { - return fmt.Errorf("Resource Name should be cm1") - } - - if appliedResource.UID != string(currentUID) { - return fmt.Errorf("UID should be equal") - } - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - }) - - }) - - ginkgo.Context("Delete options", func() { - ginkgo.BeforeEach(func() { - manifests = []workapiv1.Manifest{ - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm1", map[string]string{"a": "b"}, []string{})), - util.ToManifest(util.NewConfigmap(o.SpokeClusterName, "cm2", map[string]string{"c": "d"}, []string{})), - } - }) - - ginkgo.JustBeforeEach(func() { - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkApplied), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - util.AssertWorkCondition(work.Namespace, work.Name, hubWorkClient, string(workapiv1.WorkAvailable), metav1.ConditionTrue, - []metav1.ConditionStatus{metav1.ConditionTrue, metav1.ConditionTrue}, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("Orphan deletion of the whole manifestwork", func() { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - work.Spec.DeleteOption = &workapiv1.DeleteOption{ - PropagationPolicy: workapiv1.DeletePropagationPolicyTypeOrphan, - } - - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 0 { - return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 0 { - return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete the work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Wait for deletion of manifest work - gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - }) - - ginkgo.It("Selectively Orphan deletion of the manifestwork", func() { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - work.Spec.DeleteOption = &workapiv1.DeleteOption{ - PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, - SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ - OrphaningRules: []workapiv1.OrphaningRule{ - { - Group: "", - Resource: "configmaps", - Namespace: o.SpokeClusterName, - Name: "cm1", - }, - }, - }, - } - - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 0 { - return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete the work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Wait for deletion of manifest work - gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // One of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - - // One of the resource should be kept - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - - ginkgo.It("Clean the resource when orphan deletion option is removed", func() { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - work.Spec.DeleteOption = &workapiv1.DeleteOption{ - PropagationPolicy: workapiv1.DeletePropagationPolicyTypeSelectivelyOrphan, - SelectivelyOrphan: &workapiv1.SelectivelyOrphan{ - OrphaningRules: []workapiv1.OrphaningRule{ - { - Group: "", - Resource: "configmaps", - Namespace: o.SpokeClusterName, - Name: "cm1", - }, - }, - }, - } - - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Ensure configmap exists - util.AssertExistenceOfConfigMaps(manifests, spokeKubeClient, eventuallyTimeout, eventuallyInterval) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 0 { - return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Remove the delete option - gomega.Eventually(func() error { - work, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - if err != nil { - return err - } - - work.Spec.DeleteOption = nil - _, err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) - return err - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Ensure ownership of configmap is updated - gomega.Eventually(func() error { - cm, err := spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - if err != nil { - return err - } - - if len(cm.OwnerReferences) != 1 { - return fmt.Errorf("Owner reference are not correctly updated, current ownerrefs are %v", cm.OwnerReferences) - } - - return nil - }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) - - // Delete the work - err = hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Delete(context.Background(), work.Name, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - - // Wait for deletion of manifest work - gomega.Eventually(func() bool { - _, err := hubWorkClient.WorkV1().ManifestWorks(o.SpokeClusterName).Get(context.Background(), work.Name, metav1.GetOptions{}) - return errors.IsNotFound(err) - }, eventuallyTimeout, eventuallyInterval).Should(gomega.BeTrue()) - - // All of the resource should be deleted. - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm2", metav1.GetOptions{}) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - _, err = spokeKubeClient.CoreV1().ConfigMaps(o.SpokeClusterName).Get(context.Background(), "cm1", metav1.GetOptions{}) - gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - }) - }) })