From c7a262c2a0edb070dd7d66fee7b0f7399b094fe0 Mon Sep 17 00:00:00 2001 From: xuezhaojun Date: Thu, 11 Jul 2024 14:50:32 +0800 Subject: [PATCH] Refactor tester to e2e framework. Signed-off-by: xuezhaojun --- test/e2e/addon_lease_test.go | 69 +- test/e2e/addon_test.go | 14 +- test/e2e/addonmanagement_test.go | 208 +++- test/e2e/common.go | 1244 -------------------- test/e2e/e2e_suite_test.go | 91 +- test/e2e/klusterlet_hosted_test.go | 42 +- test/e2e/klusterlet_test.go | 97 +- test/e2e/managedcluster_loopback_test.go | 32 +- test/e2e/managedclustersetbinding_test.go | 22 +- test/e2e/manifestworkreplicaset_test.go | 50 +- test/e2e/placement_test.go | 38 +- test/e2e/registration_taint_update_test.go | 6 +- test/e2e/registration_webhook_test.go | 178 +-- test/e2e/work_webhook_test.go | 29 +- test/e2e/work_workload_test.go | 122 +- test/framework/clusterclient.go | 157 +++ test/framework/clustermanager.go | 34 + test/framework/common.go | 93 ++ test/framework/deployment.go | 22 + test/framework/featuregate.go | 78 ++ test/framework/hub.go | 95 ++ test/framework/klusterlet.go | 257 ++++ test/framework/kubeconfig.go | 94 ++ test/framework/managedcluster.go | 190 +++ test/framework/managedclusteraddon.go | 78 ++ test/framework/manifestwork.go | 26 + test/framework/spoke.go | 35 + 27 files changed, 1758 insertions(+), 1643 deletions(-) delete mode 100644 test/e2e/common.go create mode 100644 test/framework/clusterclient.go create mode 100644 test/framework/clustermanager.go create mode 100644 test/framework/common.go create mode 100644 test/framework/deployment.go create mode 100644 test/framework/featuregate.go create mode 100644 test/framework/hub.go create mode 100644 test/framework/klusterlet.go create mode 100644 test/framework/kubeconfig.go create mode 100644 test/framework/managedcluster.go create mode 100644 test/framework/managedclusteraddon.go create mode 100644 test/framework/manifestwork.go create mode 100644 test/framework/spoke.go diff --git a/test/e2e/addon_lease_test.go b/test/e2e/addon_lease_test.go index b57c4bf01..c7e124703 100644 --- a/test/e2e/addon_lease_test.go +++ b/test/e2e/addon_lease_test.go @@ -17,6 +17,8 @@ import ( clusterv1 "open-cluster-management.io/api/cluster/v1" operatorapiv1 "open-cluster-management.io/api/operator/v1" + + "open-cluster-management.io/ocm/test/framework" ) const availableLabelValue = "available" @@ -28,12 +30,12 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // create an addon on created managed cluster addOnName = fmt.Sprintf("addon-%s", rand.String(6)) ginkgo.By(fmt.Sprintf("Creating managed cluster addon %q", addOnName)) - err := t.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName) + err := hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // create addon installation namespace ginkgo.By(fmt.Sprintf("Creating managed cluster addon installation namespace %q", addOnName)) - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ + _, err = spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: addOnName, }, @@ -43,13 +45,13 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( ginkgo.AfterEach(func() { ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName)) - err := t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + err := spoke.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should keep addon status to available", func() { ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName)) - _, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ + _, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: addOnName, Namespace: addOnName, @@ -61,7 +63,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } @@ -73,7 +75,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // check if the cluster has a label for addon with expected value gomega.Eventually(func() bool { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) if err != nil { return false } @@ -87,7 +89,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( ginkgo.It("Should update addon status to unavailable if addon stops to update its lease", func() { ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName)) - _, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ + _, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: addOnName, Namespace: addOnName, @@ -99,7 +101,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } @@ -111,7 +113,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // check if the cluster has a label for addon with expected value gomega.Eventually(func() bool { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) if err != nil { return false } @@ -123,14 +125,14 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( }).Should(gomega.BeTrue()) ginkgo.By(fmt.Sprintf("Updating lease %q with a past time", addOnName)) - lease, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + lease, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Get(context.TODO(), addOnName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now().Add(-10 * time.Minute)} - _, err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Update(context.TODO(), lease, metav1.UpdateOptions{}) + _, err = spoke.KubeClient.CoordinationV1().Leases(addOnName).Update(context.TODO(), lease, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } @@ -142,7 +144,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // check if the cluster has a label for addon with expected value gomega.Eventually(func() bool { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) if err != nil { return false } @@ -156,7 +158,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( ginkgo.It("Should update addon status to unknown if there is no lease for this addon", func() { ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName)) - _, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ + _, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: addOnName, Namespace: addOnName, @@ -168,7 +170,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } @@ -180,7 +182,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // check if the cluster has a label for addon with expected value gomega.Eventually(func() bool { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) if err != nil { return false } @@ -192,11 +194,11 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( }).Should(gomega.BeTrue()) ginkgo.By(fmt.Sprintf("Deleting lease %q", addOnName)) - err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + err = spoke.KubeClient.CoordinationV1().Leases(addOnName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } @@ -208,7 +210,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // check if the cluster has a label for addon with expected value gomega.Eventually(func() bool { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) if err != nil { return false } @@ -227,18 +229,17 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6)) clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6)) agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6)) - _, err := t.CreateApprovedKlusterlet( - klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode)) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + framework.CreateAndApproveKlusterlet( + hub, spoke, + klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode), bootstrapHubKubeConfigSecret, images) // create an addon on created managed cluster addOnName = fmt.Sprintf("addon-%s", rand.String(6)) ginkgo.By(fmt.Sprintf("Creating managed cluster addon %q", addOnName)) - err = t.CreateManagedClusterAddOn(clusterName, addOnName, addOnName) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CreateManagedClusterAddOn(clusterName, addOnName, addOnName)).ToNot(gomega.HaveOccurred()) // create addon installation namespace ginkgo.By(fmt.Sprintf("Creating managed cluster addon installation namespace %q", addOnName)) - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ + _, err := spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: addOnName, }, @@ -248,15 +249,15 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( ginkgo.AfterEach(func() { ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName)) - err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + err := hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName)) - gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil()) + framework.CleanKlusterletRelatedResources(hub, spoke, klusterletName, clusterName) }) ginkgo.It("Should update addon status to unknown if managed cluster stops to update its lease", func() { ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName)) - _, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ + _, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{ ObjectMeta: metav1.ObjectMeta{ Name: addOnName, Namespace: addOnName, @@ -268,7 +269,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } @@ -284,11 +285,11 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // delete registration agent to stop agent update its status ginkgo.By("Stoping klusterlet") - err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) + err = spoke.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - _, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) + _, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) if errors.IsNotFound(err) { klog.Infof("klusterlet %s deleted successfully", klusterletName) return nil @@ -302,7 +303,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( // for speeding up test, update managed cluster status to unknown manually ginkgo.By(fmt.Sprintf("Updating managed cluster %s status to unknown", clusterName)) - found, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + found, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) found.Status = clusterv1.ManagedClusterStatus{ Conditions: []metav1.Condition{ @@ -315,11 +316,11 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func( }, }, } - _, err = t.ClusterClient.ClusterV1().ManagedClusters().UpdateStatus(context.TODO(), found, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().UpdateStatus(context.TODO(), found, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e/addon_test.go b/test/e2e/addon_test.go index 6fbe394de..fdd07e052 100644 --- a/test/e2e/addon_test.go +++ b/test/e2e/addon_test.go @@ -17,37 +17,37 @@ var _ = Describe("Manage the managed cluster addons", Label("addon"), func() { }) AfterEach(func() { - err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) }) It("Create one managed cluster addon and make sure it is available", func() { By(fmt.Sprintf("create the addon %v on the managed cluster namespace %v", addOnName, universalClusterName)) - err := t.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName) + err := hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("create the addon lease %v on addon install namespace %v", addOnName, addOnName)) - err = t.CreateManagedClusterAddOnLease(addOnName, addOnName) + err = hub.CreateManagedClusterAddOnLease(addOnName, addOnName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("wait the addon %v available condition to be true", addOnName)) Eventually(func() error { - return t.CheckManagedClusterAddOnStatus(universalClusterName, addOnName) + return hub.CheckManagedClusterAddOnStatus(universalClusterName, addOnName) }).Should(Succeed()) }) It("Create one managed cluster addon and make sure it is available in Hosted mode", func() { By(fmt.Sprintf("create the addon %v on the managed cluster namespace %v", addOnName, universalClusterName)) - err := t.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName) + err := hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("create the addon lease %v on addon install namespace %v", addOnName, addOnName)) - err = t.CreateManagedClusterAddOnLease(addOnName, addOnName) + err = hub.CreateManagedClusterAddOnLease(addOnName, addOnName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("wait the addon %v available condition to be true", addOnName)) Eventually(func() error { - return t.CheckManagedClusterAddOnStatus(universalClusterName, addOnName) + return hub.CheckManagedClusterAddOnStatus(universalClusterName, addOnName) }).Should(Succeed()) }) }) diff --git a/test/e2e/addonmanagement_test.go b/test/e2e/addonmanagement_test.go index 3806be365..95b876b72 100644 --- a/test/e2e/addonmanagement_test.go +++ b/test/e2e/addonmanagement_test.go @@ -2,17 +2,24 @@ package e2e import ( "context" + "embed" "encoding/json" "fmt" ginkgo "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "github.com/valyala/fasttemplate" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" "k8s.io/klog/v2" @@ -66,15 +73,17 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.BeforeEach(func() { ginkgo.By("create addon custom sign secret") - err := copySignerSecret(context.TODO(), t.HubKubeClient, "open-cluster-management-hub", + err := copySignerSecret(context.TODO(), hub.KubeClient, "open-cluster-management-hub", "signer-secret", templateagent.AddonManagerNamespace(), customSignerSecretName) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // the addon manager deployment should be running - gomega.Eventually(t.CheckHubReady).Should(gomega.Succeed()) + gomega.Eventually(func() error { + return hub.CheckHubReady() + }).Should(gomega.Succeed()) ginkgo.By(fmt.Sprintf("create addon template resources for cluster %v", universalClusterName)) - err = createResourcesFromYamlFiles(context.Background(), t.HubDynamicClient, t.hubRestMapper, s, + err = createResourcesFromYamlFiles(context.Background(), hub.DynamicClient, hub.RestMapper, s, defaultAddonTemplateReaderManifestsFunc(manifests.AddonManifestFiles, map[string]interface{}{ "Namespace": universalClusterName, "AddonInstallNamespace": addonInstallNamespace, @@ -87,7 +96,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("create the addon %v on the managed cluster namespace %v", addOnName, universalClusterName)) - err = t.CreateManagedClusterAddOn(universalClusterName, addOnName, "test-ns") // the install namespace will be ignored + err = hub.CreateManagedClusterAddOn(universalClusterName, addOnName, "test-ns") // the install namespace will be ignored if err != nil { klog.Errorf("failed to create managed cluster addon %v on the managed cluster namespace %v: %v", addOnName, universalClusterName, err) gomega.Expect(errors.IsAlreadyExists(err)).To(gomega.BeTrue()) @@ -95,20 +104,20 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By(fmt.Sprintf("wait the addon %v/%v available condition to be true", universalClusterName, addOnName)) gomega.Eventually(func() error { - return t.CheckManagedClusterAddOnStatus(universalClusterName, addOnName) + return hub.CheckManagedClusterAddOnStatus(universalClusterName, addOnName) }).Should(gomega.Succeed()) }) ginkgo.AfterEach(func() { ginkgo.By(fmt.Sprintf("delete the addon %v on the managed cluster namespace %v", addOnName, universalClusterName)) - err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete( + err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete( context.TODO(), addOnName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { ginkgo.Fail(fmt.Sprintf("failed to delete managed cluster addon %v on cluster %v: %v", addOnName, universalClusterName, err)) } gomega.Eventually(func() error { - _, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( + _, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { @@ -121,7 +130,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }).ShouldNot(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("delete addon template resources for cluster %v", universalClusterName)) - err = deleteResourcesFromYamlFiles(context.Background(), t.HubDynamicClient, t.hubRestMapper, s, + err = deleteResourcesFromYamlFiles(context.Background(), hub.DynamicClient, hub.RestMapper, s, defaultAddonTemplateReaderManifestsFunc(manifests.AddonManifestFiles, map[string]interface{}{ "Namespace": universalClusterName, "AddonInstallNamespace": addonInstallNamespace, @@ -134,7 +143,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("delete addon custom sign secret") - err = t.HubKubeClient.CoreV1().Secrets(templateagent.AddonManagerNamespace()).Delete(context.TODO(), + err = hub.KubeClient.CoreV1().Secrets(templateagent.AddonManagerNamespace()).Delete(context.TODO(), customSignerSecretName, metav1.DeleteOptions{}) if err != nil && !errors.IsNotFound(err) { ginkgo.Fail(fmt.Sprintf("failed to delete custom signer secret %v/%v: %v", @@ -144,7 +153,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, // delete all CSR created for the addon on the hub cluster, otherwise if it reches the limit number 10, the // other tests will fail gomega.Eventually(func() error { - csrs, err := t.HubKubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), + csrs, err := hub.KubeClient.CertificatesV1().CertificateSigningRequests().List(context.TODO(), metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", addonapiv1alpha1.AddonLabelKey, addOnName, clusterv1.ClusterNameLabelKey, universalClusterName), @@ -154,7 +163,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, } for _, csr := range csrs.Items { - err = t.HubKubeClient.CertificatesV1().CertificateSigningRequests().Delete(context.TODO(), + err = hub.KubeClient.CertificatesV1().CertificateSigningRequests().Delete(context.TODO(), csr.Name, metav1.DeleteOptions{}) if err != nil { return err @@ -168,14 +177,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.It("Template type addon should be functioning", func() { ginkgo.By("Check hub kubeconfig secret is created") gomega.Eventually(func() error { - _, err := t.HubKubeClient.CoreV1().Secrets(addonInstallNamespace).Get(context.TODO(), + _, err := hub.KubeClient.CoreV1().Secrets(addonInstallNamespace).Get(context.TODO(), templateagent.HubKubeconfigSecretName(addOnName), metav1.GetOptions{}) return err }).Should(gomega.Succeed()) ginkgo.By("Check custom signer secret is created") gomega.Eventually(func() error { - _, err := t.HubKubeClient.CoreV1().Secrets(addonInstallNamespace).Get(context.TODO(), + _, err := hub.KubeClient.CoreV1().Secrets(addonInstallNamespace).Get(context.TODO(), templateagent.CustomSignedSecretName(addOnName, customSignerName), metav1.GetOptions{}) return err }).Should(gomega.Succeed()) @@ -192,12 +201,12 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }, } - _, err := t.HubKubeClient.CoreV1().ConfigMaps(universalClusterName).Create( + _, err := hub.KubeClient.CoreV1().ConfigMaps(universalClusterName).Create( context.Background(), configmap, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - copyiedConfig, err := t.SpokeKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get( + copyiedConfig, err := spoke.KubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get( context.Background(), configmap.Name, metav1.GetOptions{}) if err != nil { return err @@ -210,7 +219,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("Make sure manifestwork config is configured") - manifestWork, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), + manifestWork, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), fmt.Sprintf("addon-%s-deploy-0", addOnName), metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) foundDeploymentConfig := false @@ -242,13 +251,13 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, } ginkgo.By(fmt.Sprintf("delete the addon %v on the managed cluster namespace %v", addOnName, universalClusterName)) - err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete( + err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete( context.TODO(), addOnName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("The pre-delete job should clean up the configmap after the addon is deleted") gomega.Eventually(func() error { - _, err := t.SpokeKubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get( + _, err := spoke.KubeClient.CoreV1().ConfigMaps(addonInstallNamespace).Get( context.Background(), configmap.Name, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { @@ -261,7 +270,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }).ShouldNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - _, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( + _, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( context.TODO(), addOnName, metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { @@ -275,7 +284,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("The pre-delete job should be deleted ") gomega.Eventually(func() error { - _, err := t.SpokeKubeClient.BatchV1().Jobs(addonInstallNamespace).Get( + _, err := spoke.KubeClient.BatchV1().Jobs(addonInstallNamespace).Get( context.Background(), "hello-template-cleanup-configmap", metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { @@ -303,7 +312,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, registriesJson, err := json.Marshal(overrideRegistries) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get( + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( context.Background(), universalClusterName, metav1.GetOptions{}) if err != nil { return err @@ -318,7 +327,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, annotations[clusterv1.ClusterImageRegistriesAnnotationKey] = string(registriesJson) newCluster.Annotations = annotations - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update( + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update( context.Background(), newCluster, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) @@ -330,7 +339,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Add the configs to ManagedClusterAddOn") gomega.Eventually(func() error { - addon, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( + addon, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( context.Background(), addOnName, metav1.GetOptions{}) if err != nil { return err @@ -348,7 +357,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }, }, } - _, err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( + _, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( context.Background(), newAddon, metav1.UpdateOptions{}) if err != nil { return err @@ -358,7 +367,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Make sure addon is configured") gomega.Eventually(func() error { - agentDeploy, err := t.SpokeKubeClient.AppsV1().Deployments(addonInstallNamespace).Get( + agentDeploy, err := spoke.KubeClient.AppsV1().Deployments(addonInstallNamespace).Get( context.Background(), "hello-template-agent", metav1.GetOptions{}) if err != nil { return err @@ -378,7 +387,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Restore the managed cluster annotation") gomega.Eventually(func() error { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get( + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( context.Background(), universalClusterName, metav1.GetOptions{}) if err != nil { return err @@ -386,7 +395,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, newCluster := cluster.DeepCopy() delete(newCluster.Annotations, clusterv1.ClusterImageRegistriesAnnotationKey) - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update( + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update( context.Background(), newCluster, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) @@ -395,14 +404,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, // but it is needed by the pre-delete job ginkgo.By("Restore the configs to ManagedClusterAddOn") gomega.Eventually(func() error { - addon, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( + addon, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( context.Background(), addOnName, metav1.GetOptions{}) if err != nil { return err } newAddon := addon.DeepCopy() newAddon.Spec.Configs = []addonapiv1alpha1.AddOnConfig{} - _, err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( + _, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( context.Background(), newAddon, metav1.UpdateOptions{}) if err != nil { return err @@ -412,7 +421,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Make sure addon config is restored") gomega.Eventually(func() error { - agentDeploy, err := t.SpokeKubeClient.AppsV1().Deployments(addonInstallNamespace).Get( + agentDeploy, err := spoke.KubeClient.AppsV1().Deployments(addonInstallNamespace).Get( context.Background(), "hello-template-agent", metav1.GetOptions{}) if err != nil { return err @@ -439,7 +448,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Add the configs to ManagedClusterAddOn") gomega.Eventually(func() error { - addon, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( + addon, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( context.Background(), addOnName, metav1.GetOptions{}) if err != nil { return err @@ -457,7 +466,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }, }, } - _, err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( + _, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( context.Background(), newAddon, metav1.UpdateOptions{}) if err != nil { return err @@ -467,7 +476,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Make sure addon is configured") gomega.Eventually(func() error { - agentDeploy, err := t.SpokeKubeClient.AppsV1().Deployments(addonInstallNamespace).Get( + agentDeploy, err := spoke.KubeClient.AppsV1().Deployments(addonInstallNamespace).Get( context.Background(), "hello-template-agent", metav1.GetOptions{}) if err != nil { return err @@ -493,7 +502,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, Name: "another-addon-namespace", }, } - _, err := t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), overrideNamespace, metav1.CreateOptions{}) + _, err := spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), overrideNamespace, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { return prepareInstallNamespace(universalClusterName, overrideNamespace.Name) @@ -501,7 +510,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Add the configs to ManagedClusterAddOn") gomega.Eventually(func() error { - addon, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( + addon, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get( context.Background(), addOnName, metav1.GetOptions{}) if err != nil { return err @@ -519,7 +528,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }, }, } - _, err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( + _, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Update( context.Background(), newAddon, metav1.UpdateOptions{}) if err != nil { return err @@ -529,7 +538,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, ginkgo.By("Make sure addon is configured") gomega.Eventually(func() error { - _, err := t.SpokeKubeClient.AppsV1().Deployments(overrideNamespace.Name).Get( + _, err := spoke.KubeClient.AppsV1().Deployments(overrideNamespace.Name).Get( context.Background(), "hello-template-agent", metav1.GetOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) @@ -543,7 +552,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, registriesJson, err := json.Marshal(overrideRegistries) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get( + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( context.Background(), universalClusterName, metav1.GetOptions{}) if err != nil { return err @@ -558,14 +567,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, annotations[clusterv1.ClusterImageRegistriesAnnotationKey] = string(registriesJson) newCluster.Annotations = annotations - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update( + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update( context.Background(), newCluster, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("Make sure addon is configured") gomega.Eventually(func() error { - agentDeploy, err := t.SpokeKubeClient.AppsV1().Deployments(addonInstallNamespace).Get( + agentDeploy, err := spoke.KubeClient.AppsV1().Deployments(addonInstallNamespace).Get( context.Background(), "hello-template-agent", metav1.GetOptions{}) if err != nil { return err @@ -587,7 +596,7 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, // but it is needed by the pre-delete job ginkgo.By("Restore the managed cluster annotation") gomega.Eventually(func() error { - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get( + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get( context.Background(), universalClusterName, metav1.GetOptions{}) if err != nil { return err @@ -595,14 +604,14 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, newCluster := cluster.DeepCopy() delete(newCluster.Annotations, clusterv1.ClusterImageRegistriesAnnotationKey) - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update( + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update( context.Background(), newCluster, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("Make sure addon config is restored") gomega.Eventually(func() error { - agentDeploy, err := t.SpokeKubeClient.AppsV1().Deployments(addonInstallNamespace).Get( + agentDeploy, err := spoke.KubeClient.AppsV1().Deployments(addonInstallNamespace).Get( context.Background(), "hello-template-agent", metav1.GetOptions{}) if err != nil { return err @@ -624,10 +633,10 @@ var _ = ginkgo.Describe("Enable addon management feature gate", ginkgo.Ordered, }) func prepareInstallNamespace(namespace, installNamespace string) error { - _, err := t.AddOnClinet.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( + _, err := hub.AddonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( context.Background(), namespaceOverrideConfigName, metav1.GetOptions{}) if errors.IsNotFound(err) { - if _, err := t.AddOnClinet.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Create( + if _, err := hub.AddonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Create( context.Background(), &addonapiv1alpha1.AddOnDeploymentConfig{ ObjectMeta: metav1.ObjectMeta{ @@ -650,10 +659,10 @@ func prepareInstallNamespace(namespace, installNamespace string) error { } func prepareImageOverrideAddOnDeploymentConfig(namespace, installNamespace string) error { - _, err := t.AddOnClinet.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( + _, err := hub.AddonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( context.Background(), imageOverrideDeploymentConfigName, metav1.GetOptions{}) if errors.IsNotFound(err) { - if _, err := t.AddOnClinet.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Create( + if _, err := hub.AddonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Create( context.Background(), &addonapiv1alpha1.AddOnDeploymentConfig{ ObjectMeta: metav1.ObjectMeta{ @@ -677,10 +686,10 @@ func prepareImageOverrideAddOnDeploymentConfig(namespace, installNamespace strin } func prepareNodePlacementAddOnDeploymentConfig(namespace, installNamespace string) error { - _, err := t.AddOnClinet.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( + _, err := hub.AddonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Get( context.Background(), nodePlacementDeploymentConfigName, metav1.GetOptions{}) if errors.IsNotFound(err) { - if _, err := t.AddOnClinet.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Create( + if _, err := hub.AddonClient.AddonV1alpha1().AddOnDeploymentConfigs(namespace).Create( context.Background(), &addonapiv1alpha1.AddOnDeploymentConfig{ ObjectMeta: metav1.ObjectMeta{ @@ -727,3 +736,104 @@ func copySignerSecret(ctx context.Context, kubeClient kubernetes.Interface, srcN } return nil } + +func createResourcesFromYamlFiles( + ctx context.Context, + dynamicClient dynamic.Interface, + restMapper meta.RESTMapper, + scheme *runtime.Scheme, + manifests func(name string) ([]byte, error), + resourceFiles []string) error { + + var appliedErrs []error + + decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() + for _, fileName := range resourceFiles { + objData, err := manifests(fileName) + if err != nil { + return err + } + required := unstructured.Unstructured{} + _, gvk, err := decoder.Decode(objData, nil, &required) + if err != nil { + return err + } + + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + _, err = dynamicClient.Resource(mapping.Resource).Namespace(required.GetNamespace()).Create( + ctx, &required, metav1.CreateOptions{}) + if errors.IsAlreadyExists(err) { + continue + } + if err != nil { + fmt.Printf("Error creating %q (%T): %v\n", fileName, mapping.Resource, err) + appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", fileName, mapping.Resource, err)) + } + } + + return utilerrors.NewAggregate(appliedErrs) +} + +func deleteResourcesFromYamlFiles( + ctx context.Context, + dynamicClient dynamic.Interface, + restMapper meta.RESTMapper, + scheme *runtime.Scheme, + manifests func(name string) ([]byte, error), + resourceFiles []string) error { + + var appliedErrs []error + + decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() + for _, fileName := range resourceFiles { + objData, err := manifests(fileName) + if err != nil { + return err + } + required := unstructured.Unstructured{} + _, gvk, err := decoder.Decode(objData, nil, &required) + if err != nil { + return err + } + + mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) + if err != nil { + return err + } + + err = dynamicClient.Resource(mapping.Resource).Namespace(required.GetNamespace()).Delete( + ctx, required.GetName(), metav1.DeleteOptions{}) + if errors.IsNotFound(err) { + continue + } + if err != nil { + fmt.Printf("Error deleting %q (%T): %v\n", fileName, mapping.Resource, err) + appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", fileName, mapping.Resource, err)) + } + } + + return utilerrors.NewAggregate(appliedErrs) +} + +// defaultAddonTemplateReaderManifestsFunc returns a function that reads the addon template from the embed.FS, +// and replaces the placeholder in format of "<< placeholder >>" with the value in configValues. +func defaultAddonTemplateReaderManifestsFunc( + fs embed.FS, + configValues map[string]interface{}, +) func(string) ([]byte, error) { + + return func(fileName string) ([]byte, error) { + template, err := fs.ReadFile(fileName) + if err != nil { + return nil, err + } + + t := fasttemplate.New(string(template), "<< ", " >>") + objData := t.ExecuteString(configValues) + return []byte(objData), nil + } +} diff --git a/test/e2e/common.go b/test/e2e/common.go deleted file mode 100644 index 4f69751d6..000000000 --- a/test/e2e/common.go +++ /dev/null @@ -1,1244 +0,0 @@ -package e2e - -import ( - "bytes" - "context" - "embed" - "fmt" - "io" - "os" - "strings" - "time" - - "github.com/onsi/gomega" - "github.com/valyala/fasttemplate" - authv1 "k8s.io/api/authentication/v1" - certificatesv1 "k8s.io/api/certificates/v1" - coordv1 "k8s.io/api/coordination/v1" - corev1 "k8s.io/api/core/v1" - rbacv1 "k8s.io/api/rbac/v1" - apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog/v2" - "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client/apiutil" - - addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" - addonclient "open-cluster-management.io/api/client/addon/clientset/versioned" - clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" - operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" - workv1client "open-cluster-management.io/api/client/work/clientset/versioned" - clusterv1 "open-cluster-management.io/api/cluster/v1" - ocmfeature "open-cluster-management.io/api/feature" - operatorapiv1 "open-cluster-management.io/api/operator/v1" - workapiv1 "open-cluster-management.io/api/work/v1" - - "open-cluster-management.io/ocm/pkg/operator/helpers" - "open-cluster-management.io/ocm/test/integration/util" -) - -type Tester struct { - hubKubeConfigPath string - spokeKubeConfigPath string - HubKubeClient kubernetes.Interface - SpokeKubeClient kubernetes.Interface - HubAPIExtensionClient apiextensionsclient.Interface - HubClusterCfg *rest.Config - SpokeClusterCfg *rest.Config - OperatorClient operatorclient.Interface - ClusterClient clusterclient.Interface - HubWorkClient workv1client.Interface - SpokeWorkClient workv1client.Interface - AddOnClinet addonclient.Interface - hubRestMapper meta.RESTMapper - HubDynamicClient dynamic.Interface - SpokeDynamicClient dynamic.Interface - bootstrapHubSecret *corev1.Secret - clusterManagerName string - clusterManagerNamespace string - operatorNamespace string - klusterletOperator string - registrationImage string - workImage string - singletonImage string -} - -// kubeconfigPath is the path of kubeconfig file, will be get from env "KUBECONFIG" by default. -// bootstrapHubSecret is the bootstrap hub kubeconfig secret, and the format is "namespace/secretName". -// Default of bootstrapHubSecret is helpers.KlusterletDefaultNamespace/helpers.BootstrapHubKubeConfig. -func NewTester(hubKubeConfigPath, spokeKubeConfigPath, registrationImage, workImage, singletonImage string) *Tester { - var tester = Tester{ - hubKubeConfigPath: hubKubeConfigPath, - spokeKubeConfigPath: spokeKubeConfigPath, - clusterManagerName: "cluster-manager", // same name as deploy/cluster-manager/config/samples - clusterManagerNamespace: helpers.ClusterManagerDefaultNamespace, - operatorNamespace: "open-cluster-management", - klusterletOperator: "klusterlet", - registrationImage: registrationImage, - workImage: workImage, - singletonImage: singletonImage, - } - - return &tester -} - -func (t *Tester) Init() error { - var err error - - if t.hubKubeConfigPath == "" { - t.hubKubeConfigPath = os.Getenv("KUBECONFIG") - } - if t.spokeKubeConfigPath == "" { - t.spokeKubeConfigPath = os.Getenv("KUBECONFIG") - } - - if t.HubClusterCfg, err = clientcmd.BuildConfigFromFlags("", t.hubKubeConfigPath); err != nil { - klog.Errorf("failed to get HubClusterCfg from path %v . %v", t.hubKubeConfigPath, err) - return err - } - if t.SpokeClusterCfg, err = clientcmd.BuildConfigFromFlags("", t.spokeKubeConfigPath); err != nil { - klog.Errorf("failed to get SpokeClusterCfg from path %v . %v", t.spokeKubeConfigPath, err) - return err - } - - if t.HubKubeClient, err = kubernetes.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get KubeClient. %v", err) - return err - } - if t.SpokeKubeClient, err = kubernetes.NewForConfig(t.SpokeClusterCfg); err != nil { - klog.Errorf("failed to get KubeClient. %v", err) - return err - } - - hubHttpClient, err := rest.HTTPClientFor(t.HubClusterCfg) - if err != nil { - return err - } - t.hubRestMapper, err = apiutil.NewDynamicRESTMapper(t.HubClusterCfg, hubHttpClient) - if err != nil { - return err - } - - if t.HubDynamicClient, err = dynamic.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get DynamicClient. %v", err) - return err - } - - if t.SpokeDynamicClient, err = dynamic.NewForConfig(t.SpokeClusterCfg); err != nil { - klog.Errorf("failed to get DynamicClient. %v", err) - return err - } - - if t.HubAPIExtensionClient, err = apiextensionsclient.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get HubApiExtensionClient. %v", err) - return err - } - if t.OperatorClient, err = operatorclient.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get OperatorClient. %v", err) - return err - } - if t.ClusterClient, err = clusterclient.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get ClusterClient. %v", err) - return err - } - if t.HubWorkClient, err = workv1client.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get WorkClient. %v", err) - return err - } - if t.SpokeWorkClient, err = workv1client.NewForConfig(t.SpokeClusterCfg); err != nil { - klog.Errorf("failed to get WorkClient. %v", err) - return err - } - if t.AddOnClinet, err = addonclient.NewForConfig(t.HubClusterCfg); err != nil { - klog.Errorf("failed to get AddOnClinet. %v", err) - return err - } - - return nil -} - -func (t *Tester) SetOperatorNamespace(ns string) *Tester { - t.operatorNamespace = ns - return t -} - -func (t *Tester) SetBootstrapHubSecret(bootstrapHubSecret string) error { - var err error - var bootstrapHubSecretName = helpers.BootstrapHubKubeConfig - var bootstrapHubSecretNamespace = helpers.KlusterletDefaultNamespace - if bootstrapHubSecret != "" { - bootstrapHubSecretNamespace, bootstrapHubSecretName, err = cache.SplitMetaNamespaceKey(bootstrapHubSecret) - if err != nil { - klog.Errorf("the format of bootstrapHubSecret %v is invalid. %v", bootstrapHubSecret, err) - return err - } - } - if t.bootstrapHubSecret, err = t.SpokeKubeClient.CoreV1().Secrets(bootstrapHubSecretNamespace). - Get(context.TODO(), bootstrapHubSecretName, metav1.GetOptions{}); err != nil { - klog.Errorf("failed to get bootstrapHubSecret %v in ns %v. %v", bootstrapHubSecretName, - bootstrapHubSecretNamespace, err) - return err - } - t.bootstrapHubSecret.ObjectMeta.ResourceVersion = "" - t.bootstrapHubSecret.ObjectMeta.Namespace = "" - return nil -} - -func (t *Tester) CreateKlusterlet(name, clusterName, klusterletNamespace string, mode operatorapiv1.InstallMode) (*operatorapiv1.Klusterlet, error) { - if name == "" { - return nil, fmt.Errorf("the name should not be null") - } - if klusterletNamespace == "" { - klusterletNamespace = helpers.KlusterletDefaultNamespace - } - - var klusterlet = &operatorapiv1.Klusterlet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: operatorapiv1.KlusterletSpec{ - RegistrationImagePullSpec: t.registrationImage, - WorkImagePullSpec: t.workImage, - ImagePullSpec: t.singletonImage, - ExternalServerURLs: []operatorapiv1.ServerURL{ - { - URL: "https://localhost", - }, - }, - ClusterName: clusterName, - Namespace: klusterletNamespace, - DeployOption: operatorapiv1.KlusterletDeployOption{ - Mode: mode, - }, - }, - } - - agentNamespace := helpers.AgentNamespace(klusterlet) - klog.Infof("klusterlet: %s/%s, \t mode: %v, \t agent namespace: %s", klusterlet.Name, klusterlet.Namespace, mode, agentNamespace) - - // create agentNamespace - namespace := &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: agentNamespace, - Annotations: map[string]string{ - "workload.openshift.io/allowed": "management", - }, - }, - } - if _, err := t.SpokeKubeClient.CoreV1().Namespaces().Get(context.TODO(), agentNamespace, metav1.GetOptions{}); err != nil { - if !errors.IsNotFound(err) { - klog.Errorf("failed to get ns %v. %v", agentNamespace, err) - return nil, err - } - - if _, err := t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), - namespace, metav1.CreateOptions{}); err != nil { - klog.Errorf("failed to create ns %v. %v", namespace, err) - return nil, err - } - } - - // create bootstrap-hub-kubeconfig secret - secret := t.bootstrapHubSecret.DeepCopy() - if _, err := t.SpokeKubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}); err != nil { - if !errors.IsNotFound(err) { - klog.Errorf("failed to get secret %v in ns %v. %v", secret.Name, agentNamespace, err) - return nil, err - } - if _, err = t.SpokeKubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { - klog.Errorf("failed to create secret %v in ns %v. %v", secret, agentNamespace, err) - return nil, err - } - } - - if helpers.IsHosted(mode) { - // create external-managed-kubeconfig, will use the same cluster to simulate the Hosted mode. - secret.Namespace = agentNamespace - secret.Name = helpers.ExternalManagedKubeConfig - if _, err := t.HubKubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}); err != nil { - if !errors.IsNotFound(err) { - klog.Errorf("failed to get secret %v in ns %v. %v", secret.Name, agentNamespace, err) - return nil, err - } - if _, err = t.HubKubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { - klog.Errorf("failed to create secret %v in ns %v. %v", secret, agentNamespace, err) - return nil, err - } - } - } - - // create klusterlet CR - realKlusterlet, err := t.OperatorClient.OperatorV1().Klusterlets().Create(context.TODO(), - klusterlet, metav1.CreateOptions{}) - if err != nil && !errors.IsAlreadyExists(err) { - klog.Errorf("failed to create klusterlet %v . %v", klusterlet.Name, err) - return nil, err - } - - return realKlusterlet, nil -} - -func (t *Tester) CreateApprovedKlusterlet(name, clusterName, klusterletNamespace string, mode operatorapiv1.InstallMode) (*operatorapiv1.Klusterlet, error) { - klusterlet, err := t.CreateKlusterlet(name, clusterName, klusterletNamespace, mode) - if err != nil { - return nil, err - } - - gomega.Eventually(func() error { - _, err = t.GetCreatedManagedCluster(clusterName) - return err - }).Should(gomega.Succeed()) - - gomega.Eventually(func() error { - return t.ApproveCSR(clusterName) - }).Should(gomega.Succeed()) - - gomega.Eventually(func() error { - return t.AcceptsClient(clusterName) - }).Should(gomega.Succeed()) - - gomega.Eventually(func() error { - return t.CheckManagedClusterStatus(clusterName) - }).Should(gomega.Succeed()) - - return klusterlet, nil -} - -func (t *Tester) CreatePureHostedKlusterlet(name, clusterName string) (*operatorapiv1.Klusterlet, error) { - if name == "" { - return nil, fmt.Errorf("the name should not be null") - } - - var klusterlet = &operatorapiv1.Klusterlet{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: operatorapiv1.KlusterletSpec{ - RegistrationImagePullSpec: "quay.io/open-cluster-management/registration:latest", - WorkImagePullSpec: "quay.io/open-cluster-management/work:latest", - ExternalServerURLs: []operatorapiv1.ServerURL{ - { - URL: "https://localhost", - }, - }, - ClusterName: clusterName, - DeployOption: operatorapiv1.KlusterletDeployOption{ - Mode: operatorapiv1.InstallModeHosted, - }, - }, - } - - // create klusterlet CR - realKlusterlet, err := t.OperatorClient.OperatorV1().Klusterlets().Create(context.TODO(), - klusterlet, metav1.CreateOptions{}) - if err != nil { - klog.Errorf("failed to create klusterlet %v . %v", klusterlet.Name, err) - return nil, err - } - - return realKlusterlet, nil -} - -func (t *Tester) GetCreatedManagedCluster(clusterName string) (*clusterv1.ManagedCluster, error) { - if clusterName == "" { - return nil, fmt.Errorf("the name of managedcluster should not be null") - } - - cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - return cluster, nil -} - -func (t *Tester) ApproveCSR(clusterName string) error { - var csrs *certificatesv1.CertificateSigningRequestList - var csrClient = t.HubKubeClient.CertificatesV1().CertificateSigningRequests() - var err error - - if csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{ - LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name = %v", clusterName)}); err != nil { - return err - } - if len(csrs.Items) == 0 { - return fmt.Errorf("there is no csr related cluster %v", clusterName) - } - - for i := range csrs.Items { - csr := &csrs.Items[i] - if csr, err = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}); err != nil { - return err - } - - if isCSRInTerminalState(&csr.Status) { - continue - } - - csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ - Type: certificatesv1.CertificateApproved, - Status: corev1.ConditionTrue, - Reason: "Approved by E2E", - Message: "Approved as part of e2e", - }) - _, err = csrClient.UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{}) - if err != nil { - return err - } - } - return nil -} - -func isCSRInTerminalState(status *certificatesv1.CertificateSigningRequestStatus) bool { - for _, c := range status.Conditions { - if c.Type == certificatesv1.CertificateApproved { - return true - } - if c.Type == certificatesv1.CertificateDenied { - return true - } - } - return false -} - -func (t *Tester) AcceptsClient(clusterName string) error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), - clusterName, metav1.GetOptions{}) - if err != nil { - return err - } - - managedCluster.Spec.HubAcceptsClient = true - managedCluster.Spec.LeaseDurationSeconds = 5 - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), - managedCluster, metav1.UpdateOptions{}) - return err -} - -func (t *Tester) CheckManagedClusterStatus(clusterName string) error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), - clusterName, metav1.GetOptions{}) - if err != nil { - return err - } - - var okCount = 0 - for _, condition := range managedCluster.Status.Conditions { - if (condition.Type == clusterv1.ManagedClusterConditionHubAccepted || - condition.Type == clusterv1.ManagedClusterConditionJoined || - condition.Type == clusterv1.ManagedClusterConditionAvailable) && - condition.Status == v1beta1.ConditionTrue { - okCount++ - } - } - - if okCount == 3 { - return nil - } - - return fmt.Errorf("cluster %s condtions are not ready: %v", clusterName, managedCluster.Status.Conditions) -} - -func (t *Tester) CreateWorkOfConfigMap(name, clusterName, configMapName, configMapNamespace string) (*workapiv1.ManifestWork, error) { - manifest := workapiv1.Manifest{} - manifest.Object = util.NewConfigmap(configMapNamespace, configMapName, map[string]string{"a": "b"}, []string{}) - manifestWork := &workapiv1.ManifestWork{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: workapiv1.ManifestWorkSpec{ - Workload: workapiv1.ManifestsTemplate{ - Manifests: []workapiv1.Manifest{ - manifest, - }, - }, - }, - } - - return t.HubWorkClient.WorkV1().ManifestWorks(clusterName). - Create(context.TODO(), manifestWork, metav1.CreateOptions{}) -} - -func (t *Tester) checkKlusterletStatus(klusterletName, condType, reason string, status metav1.ConditionStatus) error { - klusterlet, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) - if err != nil { - return err - } - - cond := meta.FindStatusCondition(klusterlet.Status.Conditions, condType) - if cond == nil { - return fmt.Errorf("cannot find condition type %s", condType) - } - - if cond.Reason != reason { - return fmt.Errorf("condition reason is not matched, expect %s, got %s", reason, cond.Reason) - } - - if cond.Status != status { - return fmt.Errorf("condition status is not matched, expect %s, got %s", status, cond.Status) - } - - return nil -} - -func (t *Tester) cleanManifestWorks(clusterName, workName string) error { - err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - return nil - } - if err != nil { - return err - } - - gomega.Eventually(func() bool { - _, err := t.HubWorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), workName, metav1.GetOptions{}) - return errors.IsNotFound(err) - }).Should(gomega.BeTrue()) - - return nil -} - -func (t *Tester) cleanKlusterletResources(klusterletName, clusterName string) error { - if klusterletName == "" { - return fmt.Errorf("the klusterlet name should not be null") - } - - // clean the klusterlets - err := t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return nil - } - return err - } - - gomega.Eventually(func() bool { - _, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - klog.Infof("klusterlet %s deleted successfully", klusterletName) - return true - } - if err != nil { - klog.Infof("get klusterlet %s error: %v", klusterletName, err) - } - return false - }).Should(gomega.BeTrue()) - - // clean the managed clusters - err = t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) - if err != nil { - if errors.IsNotFound(err) { - return nil - } - return err - } - - gomega.Eventually(func() bool { - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) - if errors.IsNotFound(err) { - klog.Infof("managed cluster %s deleted successfully", clusterName) - return true - } - if err != nil { - klog.Infof("get managed cluster %s error: %v", klusterletName, err) - } - return false - }).Should(gomega.BeTrue()) - - return nil -} - -func (t *Tester) CheckHubReady() error { - cm, err := t.checkClusterManagerStatus() - if err != nil { - return err - } - // make sure open-cluster-management-hub namespace is created - if _, err := t.HubKubeClient.CoreV1().Namespaces(). - Get(context.TODO(), t.clusterManagerNamespace, metav1.GetOptions{}); err != nil { - return err - } - - // make sure hub deployments are created - hubRegistrationDeployment := fmt.Sprintf("%s-registration-controller", t.clusterManagerName) - hubRegistrationWebhookDeployment := fmt.Sprintf("%s-registration-webhook", t.clusterManagerName) - hubWorkWebhookDeployment := fmt.Sprintf("%s-work-webhook", t.clusterManagerName) - hubWorkControllerDeployment := fmt.Sprintf("%s-work-controller", t.clusterManagerName) - hubPlacementDeployment := fmt.Sprintf("%s-placement-controller", t.clusterManagerName) - addonManagerDeployment := fmt.Sprintf("%s-addon-manager-controller", t.clusterManagerName) - if _, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace). - Get(context.TODO(), hubRegistrationDeployment, metav1.GetOptions{}); err != nil { - return err - } - gomega.Eventually(func() error { - registrationWebhookDeployment, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace). - Get(context.TODO(), hubRegistrationWebhookDeployment, metav1.GetOptions{}) - if err != nil { - return err - } - replicas := *registrationWebhookDeployment.Spec.Replicas - readyReplicas := registrationWebhookDeployment.Status.ReadyReplicas - if readyReplicas != replicas { - return fmt.Errorf("deployment %s should have %d but got %d ready replicas", hubRegistrationWebhookDeployment, replicas, readyReplicas) - } - return nil - }).Should(gomega.BeNil()) - - gomega.Eventually(func() error { - workWebhookDeployment, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace). - Get(context.TODO(), hubWorkWebhookDeployment, metav1.GetOptions{}) - if err != nil { - return err - } - replicas := *workWebhookDeployment.Spec.Replicas - readyReplicas := workWebhookDeployment.Status.ReadyReplicas - if readyReplicas != replicas { - return fmt.Errorf("deployment %s should have %d but got %d ready replicas", hubWorkWebhookDeployment, replicas, readyReplicas) - } - return nil - }).Should(gomega.BeNil()) - - var hubWorkControllerEnabled, addonManagerControllerEnabled bool - if cm.Spec.WorkConfiguration != nil { - hubWorkControllerEnabled = helpers.FeatureGateEnabled(cm.Spec.WorkConfiguration.FeatureGates, - ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet) - } - - if cm.Spec.AddOnManagerConfiguration != nil { - addonManagerControllerEnabled = helpers.FeatureGateEnabled(cm.Spec.AddOnManagerConfiguration.FeatureGates, - ocmfeature.DefaultHubAddonManagerFeatureGates, ocmfeature.AddonManagement) - } - - if hubWorkControllerEnabled { - gomega.Eventually(func() error { - workHubControllerDeployment, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace). - Get(context.TODO(), hubWorkControllerDeployment, metav1.GetOptions{}) - if err != nil { - return err - } - replicas := *workHubControllerDeployment.Spec.Replicas - readyReplicas := workHubControllerDeployment.Status.ReadyReplicas - if readyReplicas != replicas { - return fmt.Errorf("deployment %s should have %d but got %d ready replicas", hubWorkControllerDeployment, replicas, readyReplicas) - } - return nil - }).Should(gomega.BeNil()) - } - - if _, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace). - Get(context.TODO(), hubPlacementDeployment, metav1.GetOptions{}); err != nil { - return err - } - - if addonManagerControllerEnabled { - gomega.Eventually(func() error { - addonManagerControllerDeployment, err := t.HubKubeClient.AppsV1().Deployments(t.clusterManagerNamespace). - Get(context.TODO(), addonManagerDeployment, metav1.GetOptions{}) - if err != nil { - return err - } - replicas := *addonManagerControllerDeployment.Spec.Replicas - readyReplicas := addonManagerControllerDeployment.Status.ReadyReplicas - if readyReplicas != replicas { - return fmt.Errorf("deployment %s should have %d but got %d ready replicas", addonManagerDeployment, replicas, readyReplicas) - } - return nil - }).Should(gomega.BeNil()) - } - return nil -} - -func (t *Tester) EnableWorkFeature(feature string) error { - cm, err := t.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), t.clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - - if cm.Spec.WorkConfiguration == nil { - cm.Spec.WorkConfiguration = &operatorapiv1.WorkConfiguration{} - } - - if len(cm.Spec.WorkConfiguration.FeatureGates) == 0 { - cm.Spec.WorkConfiguration.FeatureGates = make([]operatorapiv1.FeatureGate, 0) - } - - for idx, f := range cm.Spec.WorkConfiguration.FeatureGates { - if f.Feature == feature { - if f.Mode == operatorapiv1.FeatureGateModeTypeEnable { - return nil - } - cm.Spec.WorkConfiguration.FeatureGates[idx].Mode = operatorapiv1.FeatureGateModeTypeEnable - _, err = t.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), cm, metav1.UpdateOptions{}) - return err - } - } - - featureGate := operatorapiv1.FeatureGate{ - Feature: feature, - Mode: operatorapiv1.FeatureGateModeTypeEnable, - } - - cm.Spec.WorkConfiguration.FeatureGates = append(cm.Spec.WorkConfiguration.FeatureGates, featureGate) - _, err = t.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), cm, metav1.UpdateOptions{}) - return err -} - -func (t *Tester) RemoveWorkFeature(feature string) error { - clusterManager, err := t.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), t.clusterManagerName, metav1.GetOptions{}) - if err != nil { - return err - } - for indx, fg := range clusterManager.Spec.WorkConfiguration.FeatureGates { - if fg.Feature == feature { - clusterManager.Spec.WorkConfiguration.FeatureGates[indx].Mode = operatorapiv1.FeatureGateModeTypeDisable - break - } - } - _, err = t.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), clusterManager, metav1.UpdateOptions{}) - return err -} - -func (t *Tester) checkClusterManagerStatus() (*operatorapiv1.ClusterManager, error) { - cm, err := t.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), t.clusterManagerName, metav1.GetOptions{}) - if err != nil { - return nil, err - } - if meta.IsStatusConditionFalse(cm.Status.Conditions, "Applied") { - return nil, fmt.Errorf("components of cluster manager are not all applied") - } - if meta.IsStatusConditionFalse(cm.Status.Conditions, "ValidFeatureGates") { - return nil, fmt.Errorf("feature gates are not all valid") - } - if !meta.IsStatusConditionFalse(cm.Status.Conditions, "HubRegistrationDegraded") { - return nil, fmt.Errorf("HubRegistration is degraded") - } - if !meta.IsStatusConditionFalse(cm.Status.Conditions, "HubPlacementDegraded") { - return nil, fmt.Errorf("HubPlacement is degraded") - } - if !meta.IsStatusConditionFalse(cm.Status.Conditions, "Progressing") { - return nil, fmt.Errorf("ClusterManager is still progressing") - } - - return cm, nil -} - -func (t *Tester) CheckKlusterletOperatorReady() error { - // make sure klusterlet operator deployment is created - _, err := t.SpokeKubeClient.AppsV1().Deployments(t.operatorNamespace). - Get(context.TODO(), t.klusterletOperator, metav1.GetOptions{}) - return err -} - -// GetRandomClusterName gets the clusterName generated by registration randomly. -// the cluster name is the random name if it has not prefix "e2e-". -// TODO: get random cluster name from event -func (t *Tester) GetRandomClusterName() (string, error) { - managedClusterList, err := t.ClusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - return "", err - } - - for _, managedCluster := range managedClusterList.Items { - clusterName := managedCluster.Name - if !strings.HasPrefix(clusterName, "e2e-") { - return clusterName, nil - } - } - return "", fmt.Errorf("there is no managedCluster with the random name") -} - -// TODO: only output the details of created resources during e2e -func (t *Tester) OutputDebugLogs() { - klusterletes, err := t.OperatorClient.OperatorV1().Klusterlets().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - klog.Errorf("failed to list klusterlets. error: %v", err) - } - for _, klusterlet := range klusterletes.Items { - klog.Infof("klusterlet %v : %#v \n", klusterlet.Name, klusterlet) - } - - managedClusters, err := t.ClusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{}) - if err != nil { - klog.Errorf("failed to list managedClusters. error: %v", err) - } - for _, managedCluster := range managedClusters.Items { - klog.Infof("managedCluster %v : %#v \n", managedCluster.Name, managedCluster) - } - - registrationPods, err := t.SpokeKubeClient.CoreV1().Pods("").List(context.Background(), - metav1.ListOptions{LabelSelector: "app=klusterlet-registration-agent"}) - if err != nil { - klog.Errorf("failed to list registration pods. error: %v", err) - } - - manifestWorkPods, err := t.SpokeKubeClient.CoreV1().Pods("").List(context.Background(), - metav1.ListOptions{LabelSelector: "app=klusterlet-manifestwork-agent"}) - if err != nil { - klog.Errorf("failed to get manifestwork pods. error: %v", err) - } - - agentPods := append(registrationPods.Items, manifestWorkPods.Items...) - for _, pod := range agentPods { - klog.Infof("klusterlet agent pod %v/%v\n", pod.Namespace, pod.Name) - logs, err := t.SpokePodLog(pod.Name, pod.Namespace, int64(10)) - if err != nil { - klog.Errorf("failed to get pod %v/%v log. error: %v", pod.Namespace, pod.Name, err) - continue - } - klog.Infof("pod %v/%v logs:\n %v \n", pod.Namespace, pod.Name, logs) - } - - manifestWorks, err := t.HubWorkClient.WorkV1().ManifestWorks("").List(context.TODO(), metav1.ListOptions{}) - if err != nil { - klog.Errorf("failed to list manifestWorks. error: %v", err) - } - for _, manifestWork := range manifestWorks.Items { - klog.Infof("manifestWork %v/%v : %#v \n", manifestWork.Namespace, manifestWork.Name, manifestWork) - } -} - -func (t *Tester) SpokePodLog(podName, nameSpace string, lines int64) (string, error) { - podLogs, err := t.SpokeKubeClient.CoreV1().Pods(nameSpace). - GetLogs(podName, &corev1.PodLogOptions{TailLines: &lines}).Stream(context.TODO()) - if err != nil { - return "", err - } - defer podLogs.Close() - - buf := new(bytes.Buffer) - _, err = io.Copy(buf, podLogs) - if err != nil { - return "", err - } - - return buf.String(), nil -} - -func (t *Tester) CreateManagedClusterAddOn(managedClusterNamespace, addOnName, installNamespace string) error { - _, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(managedClusterNamespace).Create( - context.TODO(), - &addonv1alpha1.ManagedClusterAddOn{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: managedClusterNamespace, - Name: addOnName, - }, - Spec: addonv1alpha1.ManagedClusterAddOnSpec{ - InstallNamespace: installNamespace, - }, - }, - metav1.CreateOptions{}, - ) - return err -} - -func (t *Tester) CreateManagedClusterAddOnLease(addOnInstallNamespace, addOnName string) error { - if _, err := t.HubKubeClient.CoreV1().Namespaces().Create( - context.TODO(), - &corev1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: addOnInstallNamespace, - }, - }, - metav1.CreateOptions{}, - ); err != nil { - return err - } - - _, err := t.HubKubeClient.CoordinationV1().Leases(addOnInstallNamespace).Create( - context.TODO(), - &coordv1.Lease{ - ObjectMeta: metav1.ObjectMeta{ - Name: addOnName, - Namespace: addOnInstallNamespace, - }, - Spec: coordv1.LeaseSpec{ - RenewTime: &metav1.MicroTime{Time: time.Now()}, - }, - }, - metav1.CreateOptions{}, - ) - return err -} - -func (t *Tester) CheckManagedClusterAddOnStatus(managedClusterNamespace, addOnName string) error { - addOn, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(managedClusterNamespace).Get(context.TODO(), addOnName, metav1.GetOptions{}) - if err != nil { - return err - } - - if addOn.Status.Conditions == nil { - return fmt.Errorf("there is no conditions in addon %v/%v", managedClusterNamespace, addOnName) - } - - if !meta.IsStatusConditionTrue(addOn.Status.Conditions, "Available") { - return fmt.Errorf("the addon %v/%v available condition is not true, %v", - managedClusterNamespace, addOnName, addOn.Status.Conditions) - } - - return nil -} - -func (t *Tester) DeleteExternalKubeconfigSecret(klusterlet *operatorapiv1.Klusterlet) error { - agentNamespace := helpers.AgentNamespace(klusterlet) - err := t.HubKubeClient.CoreV1().Secrets(agentNamespace).Delete(context.TODO(), - helpers.ExternalManagedKubeConfig, metav1.DeleteOptions{}) - if err != nil { - klog.Errorf("failed to delete external managed secret in ns %v. %v", agentNamespace, err) - return err - } - - return nil -} - -func (t *Tester) CreateFakeExternalKubeconfigSecret(klusterlet *operatorapiv1.Klusterlet) error { - agentNamespace := helpers.AgentNamespace(klusterlet) - klog.Infof("klusterlet: %s/%s, \t, \t agent namespace: %s", - klusterlet.Name, klusterlet.Namespace, agentNamespace) - - bsSecret, err := t.HubKubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), - t.bootstrapHubSecret.Name, metav1.GetOptions{}) - if err != nil { - klog.Errorf("failed to get bootstrap secret %v in ns %v. %v", bsSecret, agentNamespace, err) - return err - } - - // create external-managed-kubeconfig, will use the same cluster to simulate the Hosted mode. - secret, err := changeHostOfKubeconfigSecret(*bsSecret, "https://kube-apiserver.i-am-a-fake-server:6443") - if err != nil { - klog.Errorf("failed to change host of the kubeconfig secret in. %v", err) - return err - } - secret.Namespace = agentNamespace - secret.Name = helpers.ExternalManagedKubeConfig - secret.ResourceVersion = "" - - _, err = t.HubKubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) - if err != nil { - klog.Errorf("failed to create external managed secret %v in ns %v. %v", bsSecret, agentNamespace, err) - return err - } - - return nil -} - -func (t *Tester) BuildClusterClient(saNamespace, saName string, clusterPolicyRules, policyRules []rbacv1.PolicyRule) (clusterclient.Interface, error) { - var err error - - sa := &corev1.ServiceAccount{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: saNamespace, - Name: saName, - }, - } - _, err = t.HubKubeClient.CoreV1().ServiceAccounts(saNamespace).Create(context.TODO(), sa, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - // create cluster role/rolebinding - if len(clusterPolicyRules) > 0 { - clusterRoleName := fmt.Sprintf("%s-clusterrole", saName) - clusterRole := &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: clusterRoleName, - }, - Rules: clusterPolicyRules, - } - _, err = t.HubKubeClient.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - clusterRoleBinding := &rbacv1.ClusterRoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-clusterrolebinding", saName), - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: saNamespace, - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "ClusterRole", - Name: clusterRoleName, - }, - } - _, err = t.HubKubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - } - - // create cluster role/rolebinding - if len(policyRules) > 0 { - roleName := fmt.Sprintf("%s-role", saName) - role := &rbacv1.Role{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: saNamespace, - Name: roleName, - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"cluster.open-cluster-management.io"}, - Resources: []string{"managedclustersetbindings"}, - Verbs: []string{"create", "get", "update"}, - }, - }, - } - _, err = t.HubKubeClient.RbacV1().Roles(saNamespace).Create(context.TODO(), role, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - - roleBinding := &rbacv1.RoleBinding{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: saNamespace, - Name: fmt.Sprintf("%s-rolebinding", saName), - }, - Subjects: []rbacv1.Subject{ - { - Kind: "ServiceAccount", - Namespace: saNamespace, - Name: saName, - }, - }, - RoleRef: rbacv1.RoleRef{ - APIGroup: "rbac.authorization.k8s.io", - Kind: "Role", - Name: roleName, - }, - } - _, err = t.HubKubeClient.RbacV1().RoleBindings(saNamespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}) - if err != nil { - return nil, err - } - } - - tokenRequest, err := t.HubKubeClient.CoreV1().ServiceAccounts(saNamespace).CreateToken( - context.TODO(), - saName, - &authv1.TokenRequest{ - Spec: authv1.TokenRequestSpec{ - ExpirationSeconds: pointer.Int64(8640 * 3600), - }, - }, - metav1.CreateOptions{}, - ) - if err != nil { - return nil, err - } - - unauthorizedClusterClient, err := clusterclient.NewForConfig(&rest.Config{ - Host: t.HubClusterCfg.Host, - TLSClientConfig: rest.TLSClientConfig{ - CAData: t.HubClusterCfg.CAData, - }, - BearerToken: tokenRequest.Status.Token, - }) - return unauthorizedClusterClient, err -} - -// cleanupClusterClient delete cluster-scope resource created by func "buildClusterClient", -// the namespace-scope resources should be deleted by an additional namespace deleting func. -// It is recommended be invoked as a pair with the func "buildClusterClient" -func (t *Tester) CleanupClusterClient(saNamespace, saName string) error { - err := t.HubKubeClient.CoreV1().ServiceAccounts(saNamespace).Delete(context.TODO(), saName, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("delete sa %q/%q failed: %v", saNamespace, saName, err) - } - - // delete cluster role and cluster role binding if exists - clusterRoleName := fmt.Sprintf("%s-clusterrole", saName) - err = t.HubKubeClient.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRoleName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("delete cluster role %q failed: %v", clusterRoleName, err) - } - clusterRoleBindingName := fmt.Sprintf("%s-clusterrolebinding", saName) - err = t.HubKubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return fmt.Errorf("delete cluster role binding %q failed: %v", clusterRoleBindingName, err) - } - - return nil -} - -func (t *Tester) DeleteManageClusterAndRelatedNamespace(clusterName string) error { - if err := wait.Poll(1*time.Second, 90*time.Second, func() (bool, error) { - err := t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) - if err != nil && !errors.IsNotFound(err) { - return false, err - } - return true, nil - }); err != nil { - return fmt.Errorf("delete managed cluster %q failed: %v", clusterName, err) - } - - // delete namespace created by hub automatically - if err := wait.Poll(1*time.Second, 5*time.Second, func() (bool, error) { - err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) - // some managed cluster just created, but the csr is not approved, - // so there is not a related namespace - if err != nil && !errors.IsNotFound(err) { - return false, err - } - - return true, nil - }); err != nil { - return fmt.Errorf("delete related namespace %q failed: %v", clusterName, err) - } - - return nil -} - -func changeHostOfKubeconfigSecret(secret corev1.Secret, apiServerURL string) (*corev1.Secret, error) { - kubeconfigData, ok := secret.Data["kubeconfig"] - if !ok { - return nil, fmt.Errorf("kubeconfig not found") - } - - if kubeconfigData == nil { - return nil, fmt.Errorf("failed to get kubeconfig from secret: %s", secret.GetName()) - } - - kubeconfig, err := clientcmd.Load(kubeconfigData) - if err != nil { - return nil, fmt.Errorf("failed to load kubeconfig from secret: %s", secret.GetName()) - } - - if len(kubeconfig.Clusters) == 0 { - return nil, fmt.Errorf("there is no cluster in kubeconfig from secret: %s", secret.GetName()) - } - - for k := range kubeconfig.Clusters { - kubeconfig.Clusters[k].Server = apiServerURL - } - - newKubeconfig, err := clientcmd.Write(*kubeconfig) - if err != nil { - return nil, fmt.Errorf("failed to write new kubeconfig to secret: %s", secret.GetName()) - } - - secret.Data = map[string][]byte{ - "kubeconfig": newKubeconfig, - } - - klog.Infof("Set the cluster server URL in %s secret with apiServerURL %s", secret.Name, apiServerURL) - return &secret, nil -} - -func createResourcesFromYamlFiles( - ctx context.Context, - dynamicClient dynamic.Interface, - restMapper meta.RESTMapper, - scheme *runtime.Scheme, - manifests func(name string) ([]byte, error), - resourceFiles []string) error { - - var appliedErrs []error - - decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() - for _, fileName := range resourceFiles { - objData, err := manifests(fileName) - if err != nil { - return err - } - required := unstructured.Unstructured{} - _, gvk, err := decoder.Decode(objData, nil, &required) - if err != nil { - return err - } - - mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return err - } - - _, err = dynamicClient.Resource(mapping.Resource).Namespace(required.GetNamespace()).Create( - ctx, &required, metav1.CreateOptions{}) - if errors.IsAlreadyExists(err) { - continue - } - if err != nil { - fmt.Printf("Error creating %q (%T): %v\n", fileName, mapping.Resource, err) - appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", fileName, mapping.Resource, err)) - } - } - - return utilerrors.NewAggregate(appliedErrs) -} - -func deleteResourcesFromYamlFiles( - ctx context.Context, - dynamicClient dynamic.Interface, - restMapper meta.RESTMapper, - scheme *runtime.Scheme, - manifests func(name string) ([]byte, error), - resourceFiles []string) error { - - var appliedErrs []error - - decoder := serializer.NewCodecFactory(scheme).UniversalDeserializer() - for _, fileName := range resourceFiles { - objData, err := manifests(fileName) - if err != nil { - return err - } - required := unstructured.Unstructured{} - _, gvk, err := decoder.Decode(objData, nil, &required) - if err != nil { - return err - } - - mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version) - if err != nil { - return err - } - - err = dynamicClient.Resource(mapping.Resource).Namespace(required.GetNamespace()).Delete( - ctx, required.GetName(), metav1.DeleteOptions{}) - if errors.IsNotFound(err) { - continue - } - if err != nil { - fmt.Printf("Error deleting %q (%T): %v\n", fileName, mapping.Resource, err) - appliedErrs = append(appliedErrs, fmt.Errorf("%q (%T): %v", fileName, mapping.Resource, err)) - } - } - - return utilerrors.NewAggregate(appliedErrs) -} - -// defaultAddonTemplateReaderManifestsFunc returns a function that reads the addon template from the embed.FS, -// and replaces the placeholder in format of "<< placeholder >>" with the value in configValues. -func defaultAddonTemplateReaderManifestsFunc( - fs embed.FS, - configValues map[string]interface{}, -) func(string) ([]byte, error) { - - return func(fileName string) ([]byte, error) { - template, err := fs.ReadFile(fileName) - if err != nil { - return nil, err - } - - t := fasttemplate.New(string(template), "<< ", " >>") - objData := t.ExecuteString(configValues) - return []byte(objData), nil - } -} diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 23eaf8ccd..f28f54619 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -4,18 +4,21 @@ import ( "context" "flag" "fmt" + "os" "testing" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clusterv1beta2 "open-cluster-management.io/api/cluster/v1beta2" operatorapiv1 "open-cluster-management.io/api/operator/v1" -) -var t *Tester + "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/test/framework" +) var ( // kubeconfigs @@ -30,6 +33,16 @@ var ( registrationImage string workImage string singletonImage string + images framework.Images + + // bootstrap-hub-kubeconfig + // It's a secret named 'bootstrap-hub-kubeconfig' under the namespace 'open-cluster-management-agent', + // the content of the secret is a kubeconfig file. + // + // The secret is used when: + // 1. klusterlet is created not in the namespace 'open-cluster-management-agent', but a customized namespace. + // 2. in hosted,bootstrap-hub-kubeconfig secret under the ns "open-cluster-management-agent". + bootstrapHubKubeConfigSecret *corev1.Secret ) func init() { @@ -44,6 +57,9 @@ func init() { flag.StringVar(&singletonImage, "singleton-image", "", "The image of the klusterlet agent") } +var hub *framework.Hub +var spoke *framework.Spoke + // The e2e will always create one universal klusterlet, the developers can reuse this klusterlet in their case // but also pay attention, because the klusterlet is shared, so the developers should not delete the klusterlet. // And there might be some side effects on other cases if the developers change the klusterlet's spec for their cases. @@ -55,10 +71,7 @@ const ( ) func TestE2E(tt *testing.T) { - t = NewTester(hubKubeconfig, managedKubeconfig, registrationImage, workImage, singletonImage) - OutputFail := func(message string, callerSkip ...int) { - t.OutputDebugLogs() Fail(message, callerSkip...) } @@ -72,38 +85,76 @@ func TestE2E(tt *testing.T) { var _ = BeforeSuite(func() { var err error + // Setup kubeconfigs + if hubKubeconfig == "" { + hubKubeconfig = os.Getenv("KUBECONFIG") + } + if managedKubeconfig == "" { + managedKubeconfig = os.Getenv("KUBECONFIG") + } + + // Setup images + images = framework.Images{ + RegistrationImage: registrationImage, + WorkImage: workImage, + SingletonImage: singletonImage, + } + // In most OCM cases, we expect user should see the result in 90 seconds. // For cases that need more than 90 seconds, please set the timeout in the test case EXPLICITLY. SetDefaultEventuallyTimeout(90 * time.Second) SetDefaultEventuallyPollingInterval(5 * time.Second) - Expect(t.Init()).ToNot(HaveOccurred()) + By("Setup hub") + hub, err = framework.NewHub(hubKubeconfig) + Expect(err).ToNot(HaveOccurred()) - Eventually(t.CheckHubReady).Should(Succeed()) + By("Setup spokeTestHelper") + spoke, err = framework.NewSpoke(managedKubeconfig) + Expect(err).ToNot(HaveOccurred()) - Eventually(t.CheckKlusterletOperatorReady).Should(Succeed()) + By("Setup default bootstrap-hub-kubeconfig") + bootstrapHubKubeConfigSecret, err = spoke.KubeClient.CoreV1().Secrets(helpers.KlusterletDefaultNamespace). + Get(context.TODO(), helpers.BootstrapHubKubeConfig, metav1.GetOptions{}) + Expect(err).ToNot(HaveOccurred()) + // The secret will used via copy and create another secret in other ns, so we need to clean the resourceVersion and namespace + bootstrapHubKubeConfigSecret.ObjectMeta.ResourceVersion = "" + bootstrapHubKubeConfigSecret.ObjectMeta.Namespace = "" - err = t.SetBootstrapHubSecret("") + By("Check Hub Ready") + Eventually(func() error { + return hub.CheckHubReady() + }).Should(Succeed()) + + By("Check Klusterlet Operator Ready") + Eventually(func() error { + return framework.CheckDeploymentReady(context.TODO(), spoke.KubeClient, spoke.KlusterletOperatorNamespace, spoke.KlusterletOperator) + }).Should(Succeed()) + By("Enable Work Feature") if nilExecutorValidating { Eventually(func() error { - return t.EnableWorkFeature("NilExecutorValidating") + return hub.EnableHubWorkFeature("NilExecutorValidating") }).Should(Succeed()) } - Expect(err).ToNot(HaveOccurred()) + By("Enable ManifestWorkReplicaSet Feature") Eventually(func() error { - return t.EnableWorkFeature("ManifestWorkReplicaSet") + return hub.EnableHubWorkFeature("ManifestWorkReplicaSet") + }).Should(Succeed()) + Eventually(func() error { + return hub.CheckHubReady() }).Should(Succeed()) - Eventually(t.CheckHubReady).Should(Succeed()) By("Create a universal Klusterlet/managedcluster") - _, err = t.CreateApprovedKlusterlet( - universalKlusterletName, universalClusterName, universalAgentNamespace, operatorapiv1.InstallMode(klusterletDeployMode)) - Expect(err).ToNot(HaveOccurred()) + framework.CreateAndApproveKlusterlet( + hub, spoke, + universalKlusterletName, universalClusterName, universalAgentNamespace, operatorapiv1.InstallMode(klusterletDeployMode), + bootstrapHubKubeConfigSecret, images, + ) By("Create a universal ClusterSet and bind it with the universal managedcluster") - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), &clusterv1beta2.ManagedClusterSet{ + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), &clusterv1beta2.ManagedClusterSet{ ObjectMeta: metav1.ObjectMeta{ Name: universalClusterSetName, }, @@ -116,7 +167,7 @@ var _ = BeforeSuite(func() { Expect(err).ToNot(HaveOccurred()) Eventually(func() error { - umc, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) + umc, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{}) if err != nil { return err } @@ -126,12 +177,12 @@ var _ = BeforeSuite(func() { } labels[clusterv1beta2.ClusterSetLabel] = universalClusterSetName umc.SetLabels(labels) - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), umc, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), umc, metav1.UpdateOptions{}) return err }).Should(Succeed()) }) var _ = AfterSuite(func() { By(fmt.Sprintf("clean klusterlet %v resources after the test case", universalKlusterletName)) - Expect(t.cleanKlusterletResources(universalKlusterletName, universalClusterName)).To(BeNil()) + framework.CleanKlusterletRelatedResources(hub, spoke, universalKlusterletName, universalClusterName) }) diff --git a/test/e2e/klusterlet_hosted_test.go b/test/e2e/klusterlet_hosted_test.go index 3d102ebac..cb29faf99 100644 --- a/test/e2e/klusterlet_hosted_test.go +++ b/test/e2e/klusterlet_hosted_test.go @@ -14,6 +14,7 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/test/framework" ) var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func() { @@ -32,23 +33,23 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func It("Delete klusterlet CR in Hosted mode without external managed kubeconfig", func() { By(fmt.Sprintf("create klusterlet %v with managed cluster name %v in Hosted mode", klusterletName, clusterName)) - _, err := t.CreatePureHostedKlusterlet(klusterletName, clusterName) + _, err := spoke.CreatePureHostedKlusterlet(klusterletName, clusterName) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "ReadyToApply", "KlusterletPrepareFailed", metav1.ConditionFalse) + err := spoke.CheckKlusterletStatus(klusterletName, "ReadyToApply", "KlusterletPrepareFailed", metav1.ConditionFalse) return err }).Should(Succeed()) By(fmt.Sprintf("delete the klusterlet %s", klusterletName)) - err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), + err = spoke.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("check klusterlet %s was deleted", klusterletName)) Eventually(func() error { - _, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), + _, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil @@ -58,7 +59,7 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func By(fmt.Sprintf("check the agent namespace %s on the management cluster was deleted", klusterletName)) Eventually(func() error { - _, err := t.HubKubeClient.CoreV1().Namespaces().Get(context.TODO(), + _, err := hub.KubeClient.CoreV1().Namespaces().Get(context.TODO(), klusterletName, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil @@ -69,40 +70,41 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func It("Delete klusterlet CR in Hosted mode when the managed cluster was destroyed", func() { By(fmt.Sprintf("create klusterlet %v with managed cluster name %v", klusterletName, clusterName)) - klusterlet, err := t.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, operatorapiv1.InstallModeHosted) + klusterlet, err := spoke.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, + operatorapiv1.InstallModeHosted, bootstrapHubKubeConfigSecret, images) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("waiting for the managed cluster %v to be created", clusterName)) Eventually(func() error { - _, err := t.GetCreatedManagedCluster(clusterName) + _, err := hub.GetManagedCluster(clusterName) return err }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) return err }).Should(Succeed()) By(fmt.Sprintf("approve the created managed cluster %v", clusterName)) Eventually(func() error { - return t.ApproveCSR(clusterName) + return hub.ApproveManagedClusterCSR(clusterName) }).Should(Succeed()) By(fmt.Sprintf("accept the created managed cluster %v", clusterName)) Eventually(func() error { - return t.AcceptsClient(clusterName) + return hub.AcceptManageCluster(clusterName) }).Should(Succeed()) By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName)) Eventually(func() error { - return t.CheckManagedClusterStatus(clusterName) + return hub.CheckManagedClusterStatus(clusterName) }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse) return err }).Should(Succeed()) @@ -110,26 +112,26 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func // change the kubeconfig host of external managed kubeconfig secret to a wrong value // to simulate the managed cluster was destroyed By("Delete external managed kubeconfig", func() { - err = t.DeleteExternalKubeconfigSecret(klusterlet) + err = spoke.DeleteExternalKubeconfigSecret(klusterlet) Expect(err).ToNot(HaveOccurred()) }) By("Delete managed cluster", func() { // clean the managed clusters - err = t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), + err = hub.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) }) By("Delete klusterlet", func() { // clean the klusterlets - err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), + err = spoke.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) Expect(err).ToNot(HaveOccurred()) }) By("Create a fake external managed kubeconfig", func() { - err = t.CreateFakeExternalKubeconfigSecret(klusterlet) + err = spoke.CreateFakeExternalKubeconfigSecret(klusterlet) Expect(err).ToNot(HaveOccurred()) }) @@ -139,7 +141,7 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func By("Wait for the eviction timestamp annotation", func() { Eventually(func() error { - k, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), + k, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) if err != nil { return err @@ -155,7 +157,7 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func time.Sleep(3 * time.Second) // after the eviction timestamp exists, wait 3 seconds for cache syncing By("Update the eviction timestamp annotation", func() { Eventually(func() error { - k, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), + k, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) if err != nil { return err @@ -164,14 +166,14 @@ var _ = Describe("Delete hosted klusterlet CR", Label("klusterlet-hosted"), func ta := time.Now().Add(-6 * time.Minute).Format(time.RFC3339) By(fmt.Sprintf("add time %v anno for klusterlet %s", ta, klusterletName)) k.Annotations[evictionTimestampAnno] = ta - _, err = t.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), + _, err = spoke.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), k, metav1.UpdateOptions{}) return err }).Should(Succeed()) }) By("Check manged cluster and klusterlet can be deleted", func() { - Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(BeNil()) + framework.CleanKlusterletRelatedResources(hub, spoke, klusterletName, clusterName) }) }) }) diff --git a/test/e2e/klusterlet_test.go b/test/e2e/klusterlet_test.go index 1c79e1187..65de71167 100644 --- a/test/e2e/klusterlet_test.go +++ b/test/e2e/klusterlet_test.go @@ -3,6 +3,7 @@ package e2e import ( "context" "fmt" + "strings" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" @@ -13,6 +14,7 @@ import ( operatorapiv1 "open-cluster-management.io/api/operator/v1" "open-cluster-management.io/ocm/pkg/operator/helpers" + "open-cluster-management.io/ocm/test/framework" ) var _ = Describe("Create klusterlet CR", Label("klusterlet"), func() { @@ -28,85 +30,91 @@ var _ = Describe("Create klusterlet CR", Label("klusterlet"), func() { AfterEach(func() { By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName)) - Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(BeNil()) + framework.CleanKlusterletRelatedResources(hub, spoke, klusterletName, clusterName) }) // This test case is helpful for the Backward compatibility It("Create klusterlet CR with install mode empty", func() { By(fmt.Sprintf("create klusterlet %v with managed cluster name %v", klusterletName, clusterName)) // Set install mode empty - _, err := t.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, "") + _, err := spoke.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, + "", bootstrapHubKubeConfigSecret, images) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("waiting for the managed cluster %v to be created", clusterName)) Eventually(func() error { - _, err := t.GetCreatedManagedCluster(clusterName) + _, err := hub.GetManagedCluster(clusterName) return err }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", + "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) return err }).Should(Succeed()) By(fmt.Sprintf("approve the created managed cluster %v", clusterName)) Eventually(func() error { - return t.ApproveCSR(clusterName) + return hub.ApproveManagedClusterCSR(clusterName) }).Should(Succeed()) By(fmt.Sprintf("accept the created managed cluster %v", clusterName)) Eventually(func() error { - return t.AcceptsClient(clusterName) + return hub.AcceptManageCluster(clusterName) }).Should(Succeed()) By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName)) Eventually(func() error { - return t.CheckManagedClusterStatus(clusterName) + return hub.CheckManagedClusterStatus(clusterName) }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse) + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", + "HubConnectionFunctional", metav1.ConditionFalse) return err }).Should(Succeed()) }) It("Create klusterlet CR with managed cluster name", func() { By(fmt.Sprintf("create klusterlet %v with managed cluster name %v", klusterletName, clusterName)) - _, err := t.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, operatorapiv1.InstallMode(klusterletDeployMode)) + _, err := spoke.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, + operatorapiv1.InstallMode(klusterletDeployMode), bootstrapHubKubeConfigSecret, images) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("waiting for the managed cluster %v to be created", clusterName)) Eventually(func() error { - _, err := t.GetCreatedManagedCluster(clusterName) + _, err := hub.GetManagedCluster(clusterName) return err }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", + "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) return err }).Should(Succeed()) By(fmt.Sprintf("approve the created managed cluster %v", clusterName)) Eventually(func() error { - return t.ApproveCSR(clusterName) + return hub.ApproveManagedClusterCSR(clusterName) }).Should(Succeed()) By(fmt.Sprintf("accept the created managed cluster %v", clusterName)) Eventually(func() error { - return t.AcceptsClient(clusterName) + return hub.AcceptManageCluster(clusterName) }).Should(Succeed()) By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName)) Eventually(func() error { - return t.CheckManagedClusterStatus(clusterName) + return hub.CheckManagedClusterStatus(clusterName) }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse) + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", + "HubConnectionFunctional", metav1.ConditionFalse) return err }).Should(Succeed()) }) @@ -116,85 +124,100 @@ var _ = Describe("Create klusterlet CR", Label("klusterlet"), func() { klusterletNamespace = "" var err error By(fmt.Sprintf("create klusterlet %v without managed cluster name", klusterletName)) - _, err = t.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, operatorapiv1.InstallMode(klusterletDeployMode)) + _, err = spoke.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, + operatorapiv1.InstallMode(klusterletDeployMode), bootstrapHubKubeConfigSecret, images) Expect(err).ToNot(HaveOccurred()) By("waiting for the managed cluster to be created") Eventually(func() error { - clusterName, err = t.GetRandomClusterName() - return err + // GetRandomClusterName gets the clusterName generated by registration randomly. + // the cluster name is the random name if it has not prefix "e2e-". + // TODO: get random cluster name from event + managedClusterList, err := hub.ClusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + for _, managedCluster := range managedClusterList.Items { + if !strings.HasPrefix(managedCluster.Name, "e2e-") { + clusterName = managedCluster.Name + return nil + } + } + return fmt.Errorf("there is no managedCluster with the random name") }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) - return err + return spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", + "BootstrapSecretFunctional,HubKubeConfigSecretMissing", metav1.ConditionTrue) }).Should(Succeed()) By(fmt.Sprintf("approve the created managed cluster %v", clusterName)) Eventually(func() error { - return t.ApproveCSR(clusterName) + return hub.ApproveManagedClusterCSR(clusterName) }).Should(Succeed()) By(fmt.Sprintf("accept the created managed cluster %v", clusterName)) Eventually(func() error { - return t.AcceptsClient(clusterName) + return hub.AcceptManageCluster(clusterName) }).Should(Succeed()) By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName)) Eventually(func() error { - return t.CheckManagedClusterStatus(clusterName) + return hub.CheckManagedClusterStatus(clusterName) }).Should(Succeed()) By(fmt.Sprintf("check klusterlet %s status", klusterletName)) Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse) + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", + "HubConnectionFunctional", metav1.ConditionFalse) return err }).Should(Succeed()) }) It("Update klusterlet CR namespace", func() { By(fmt.Sprintf("create klusterlet %v with managed cluster name %v", klusterletName, clusterName)) - _, err := t.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, operatorapiv1.InstallMode(klusterletDeployMode)) + _, err := spoke.CreateKlusterlet(klusterletName, clusterName, klusterletNamespace, + operatorapiv1.InstallMode(klusterletDeployMode), bootstrapHubKubeConfigSecret, images) Expect(err).ToNot(HaveOccurred()) By(fmt.Sprintf("waiting for the managed cluster %v to be created", clusterName)) Eventually(func() error { - _, err := t.GetCreatedManagedCluster(clusterName) + _, err := hub.GetManagedCluster(clusterName) return err }).Should(Succeed()) By(fmt.Sprintf("approve the created managed cluster %v", clusterName)) Eventually(func() error { - return t.ApproveCSR(clusterName) + return hub.ApproveManagedClusterCSR(clusterName) }).Should(Succeed()) By(fmt.Sprintf("accept the created managed cluster %v", clusterName)) Eventually(func() error { - return t.AcceptsClient(clusterName) + return hub.AcceptManageCluster(clusterName) }).Should(Succeed()) By(fmt.Sprintf("waiting for the managed cluster %v to be ready", clusterName)) Eventually(func() error { - return t.CheckManagedClusterStatus(clusterName) + return hub.CheckManagedClusterStatus(clusterName) }).Should(Succeed()) By("update klusterlet namespace") newNamespace := "open-cluster-management-agent-another" Eventually(func() error { - klusterlet, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) + klusterlet, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) if err != nil { return err } klusterlet.Spec.Namespace = newNamespace - _, err = t.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), klusterlet, metav1.UpdateOptions{}) + _, err = spoke.OperatorClient.OperatorV1().Klusterlets().Update(context.TODO(), klusterlet, metav1.UpdateOptions{}) return err }).Should(Succeed()) By("copy bootstrap secret to the new namespace") Eventually(func() error { - secret := t.bootstrapHubSecret.DeepCopy() - _, err = t.SpokeKubeClient.CoreV1().Secrets(newNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + secret := bootstrapHubKubeConfigSecret.DeepCopy() + _, err = spoke.KubeClient.CoreV1().Secrets(newNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) if errors.IsAlreadyExists(err) { return nil } @@ -203,7 +226,7 @@ var _ = Describe("Create klusterlet CR", Label("klusterlet"), func() { By("old namespace should be removed") Eventually(func() error { - _, err := t.SpokeKubeClient.CoreV1().Namespaces().Get(context.TODO(), klusterletNamespace, metav1.GetOptions{}) + _, err := spoke.KubeClient.CoreV1().Namespaces().Get(context.TODO(), klusterletNamespace, metav1.GetOptions{}) if errors.IsNotFound(err) { return nil } @@ -212,18 +235,18 @@ var _ = Describe("Create klusterlet CR", Label("klusterlet"), func() { By("addon namespace should be kept") Eventually(func() error { - _, err := t.SpokeKubeClient.CoreV1().Namespaces().Get(context.TODO(), helpers.DefaultAddonNamespace, metav1.GetOptions{}) + _, err := spoke.KubeClient.CoreV1().Namespaces().Get(context.TODO(), helpers.DefaultAddonNamespace, metav1.GetOptions{}) return err }).Should(Succeed()) By(fmt.Sprintf("approve the managed cluster %v since it is registered in the new namespace", clusterName)) Eventually(func() error { - return t.ApproveCSR(clusterName) + return hub.ApproveManagedClusterCSR(clusterName) }).Should(Succeed()) By("klusterlet status should be ok") Eventually(func() error { - err := t.checkKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse) + err := spoke.CheckKlusterletStatus(klusterletName, "HubConnectionDegraded", "HubConnectionFunctional", metav1.ConditionFalse) return err }).Should(Succeed()) }) diff --git a/test/e2e/managedcluster_loopback_test.go b/test/e2e/managedcluster_loopback_test.go index 842fe6615..103d1b7e9 100644 --- a/test/e2e/managedcluster_loopback_test.go +++ b/test/e2e/managedcluster_loopback_test.go @@ -42,14 +42,14 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { } // delete the claim if exists // TODO use spoke cluster client - err := t.ClusterClient.ClusterV1alpha1().ClusterClaims().Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) + err := hub.ClusterClient.ClusterV1alpha1().ClusterClaims().Delete(context.TODO(), claim.Name, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // create the claim err = wait.Poll(1*time.Second, 5*time.Second, func() (bool, error) { var err error - _, err = t.ClusterClient.ClusterV1alpha1().ClusterClaims().Create(context.TODO(), claim, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1alpha1().ClusterClaims().Create(context.TODO(), claim, metav1.CreateOptions{}) if err != nil { return false, err } @@ -68,7 +68,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ginkgo.By(fmt.Sprintf("Deploying the agent using suffix=%q ns=%q", suffix, nsName)) var ( managedCluster *clusterv1.ManagedCluster - managedClusters = t.ClusterClient.ClusterV1().ManagedClusters() + managedClusters = hub.ClusterClient.ClusterV1().ManagedClusters() ) ginkgo.By(fmt.Sprintf("Waiting for ManagedCluster %q to exist", universalClusterName)) @@ -128,7 +128,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ginkgo.By(fmt.Sprintf("Make sure ManagedCluster lease %q exists", leaseName)) var lastRenewTime *metav1.MicroTime err = wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - lease, err := t.HubKubeClient.CoordinationV1().Leases(universalClusterName).Get(context.TODO(), leaseName, metav1.GetOptions{}) + lease, err := hub.KubeClient.CoordinationV1().Leases(universalClusterName).Get(context.TODO(), leaseName, metav1.GetOptions{}) if err != nil { return false, err } @@ -139,7 +139,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ginkgo.By(fmt.Sprintf("Make sure ManagedCluster lease %q is updated", leaseName)) err = wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - lease, err := t.HubKubeClient.CoordinationV1().Leases(universalClusterName).Get(context.TODO(), leaseName, metav1.GetOptions{}) + lease, err := hub.KubeClient.CoordinationV1().Leases(universalClusterName).Get(context.TODO(), leaseName, metav1.GetOptions{}) if err != nil { return false, err } @@ -153,7 +153,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ginkgo.By(fmt.Sprintf("Make sure ManagedCluster lease %q is updated again", leaseName)) err = wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) { - lease, err := t.HubKubeClient.CoordinationV1().Leases(universalClusterName).Get(context.TODO(), leaseName, metav1.GetOptions{}) + lease, err := hub.KubeClient.CoordinationV1().Leases(universalClusterName).Get(context.TODO(), leaseName, metav1.GetOptions{}) if err != nil { return false, err } @@ -225,7 +225,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { Name: addOnName, }, } - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), addOnNs, metav1.CreateOptions{}) + _, err = spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), addOnNs, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // create an addon @@ -238,10 +238,10 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { InstallNamespace: addOnName, }, } - _, err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Create(context.TODO(), addOn, metav1.CreateOptions{}) + _, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Create(context.TODO(), addOn, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - created, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) + created, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) created.Status = addonv1alpha1.ManagedClusterAddOnStatus{ Registrations: []addonv1alpha1.RegistrationConfig{ @@ -250,12 +250,12 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { }, }, } - _, err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).UpdateStatus(context.TODO(), created, metav1.UpdateOptions{}) + _, err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).UpdateStatus(context.TODO(), created, metav1.UpdateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) var ( csrs *certificatesv1.CertificateSigningRequestList - csrClient = t.HubKubeClient.CertificatesV1().CertificateSigningRequests() + csrClient = hub.KubeClient.CertificatesV1().CertificateSigningRequests() ) ginkgo.By(fmt.Sprintf("Waiting for the CSR for addOn %q to exist", addOnName)) @@ -303,7 +303,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ginkgo.By("Check addon client certificate in secret") secretName := fmt.Sprintf("%s-hub-kubeconfig", addOnName) gomega.Eventually(func() bool { - secret, err := t.SpokeKubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) + secret, err := spoke.KubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) if err != nil { return false } @@ -321,7 +321,7 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { ginkgo.By("Check addon status") gomega.Eventually(func() error { - found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(managedCluster.Name).Get(context.TODO(), addOn.Name, metav1.GetOptions{}) + found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(managedCluster.Name).Get(context.TODO(), addOn.Name, metav1.GetOptions{}) if err != nil { return err } @@ -334,16 +334,16 @@ var _ = ginkgo.Describe("Loopback registration [development]", func() { }).Should(gomega.Succeed()) ginkgo.By("Delete the addon and check if secret is gone") - err = t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + err = hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) gomega.Eventually(func() bool { - _, err = t.SpokeKubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().Secrets(addOnName).Get(context.TODO(), secretName, metav1.GetOptions{}) return errors.IsNotFound(err) }).Should(gomega.BeTrue()) ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName)) - err = t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) + err = spoke.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) }) diff --git a/test/e2e/managedclustersetbinding_test.go b/test/e2e/managedclustersetbinding_test.go index a4f4f014f..5154d298e 100644 --- a/test/e2e/managedclustersetbinding_test.go +++ b/test/e2e/managedclustersetbinding_test.go @@ -27,23 +27,24 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() { Name: namespace, }, } - _, err := t.HubKubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + _, err := hub.KubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // make sure the managedclustersetbinding can be created successfully gomega.Eventually(func() error { clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) if err != nil { return err } - return t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{}) + return hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{}) }).Should(gomega.Succeed()) }) ginkgo.AfterEach(func() { - err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}) + err := hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}) if errors.IsNotFound(err) { return } @@ -53,12 +54,13 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() { ginkgo.It("should bound a ManagedClusterSetBinding", func() { clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // make sure the managedclustersetbinding status is correct gomega.Eventually(func() error { - binding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{}) + binding, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{}) if err != nil { return err } @@ -75,12 +77,12 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() { }, } - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // make sure the managedclustersetbinding status is correct gomega.Eventually(func() error { - binding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{}) + binding, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{}) if err != nil { return err } @@ -91,12 +93,12 @@ var _ = ginkgo.Describe("ManagedClusterSetBinding", func() { return nil }).Should(gomega.Succeed()) - err = t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // make sure the managedclustersetbinding status is correct gomega.Eventually(func() error { - binding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{}) + binding, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Get(context.TODO(), clusterSetName, metav1.GetOptions{}) if err != nil { return err } diff --git a/test/e2e/manifestworkreplicaset_test.go b/test/e2e/manifestworkreplicaset_test.go index 0208dbb89..65b35bb12 100644 --- a/test/e2e/manifestworkreplicaset_test.go +++ b/test/e2e/manifestworkreplicaset_test.go @@ -62,7 +62,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor PlacementRefs: []workapiv1alpha1.LocalPlacementReference{placementRef}, }, } - manifestWorkReplicaSet, err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Create( + manifestWorkReplicaSet, err = hub.WorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Create( context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) @@ -75,7 +75,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor ClusterSet: universalClusterSetName, }, } - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(metav1.NamespaceDefault).Create( + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(metav1.NamespaceDefault).Create( context.Background(), csb, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -89,28 +89,28 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor }, } - placement, err = t.ClusterClient.ClusterV1beta1().Placements(placement.Namespace).Create(context.TODO(), placement, metav1.CreateOptions{}) + placement, err = hub.ClusterClient.ClusterV1beta1().Placements(placement.Namespace).Create(context.TODO(), placement, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) ginkgo.By("check if resources are applied for manifests") gomega.Eventually(func() error { - _, err := t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err := spoke.KubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) if err != nil { return err } - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("check if manifestworkreplicaset status") gomega.Eventually(func() error { - mwrs, err := t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Get( + mwrs, err := hub.WorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Get( context.TODO(), manifestWorkReplicaSet.Name, metav1.GetOptions{}) if err != nil { return err @@ -135,11 +135,11 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor // TODO we should also update manifestwork replicaset and test - err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Delete( + err = hub.WorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Delete( context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = t.ClusterClient.ClusterV1beta1().Placements(placement.Namespace).Delete(context.TODO(), placement.Name, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1beta1().Placements(placement.Namespace).Delete(context.TODO(), placement.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) @@ -166,31 +166,31 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor Name: namespace, }, } - _, err := t.HubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + _, err := hub.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.JustAfterEach(func() { // delete namespace - err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) + err := hub.KubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // delete clusters created - clusterList, err := t.ClusterClient.ClusterV1().ManagedClusters().List(context.Background(), metav1.ListOptions{ + clusterList, err := hub.ClusterClient.ClusterV1().ManagedClusters().List(context.Background(), metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", clusterapiv1beta2.ClusterSetLabel, clusterSetName), }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) for _, cluster := range clusterList.Items { - err = t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), cluster.Name, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), cluster.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // delete created clusterset - err = t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Delete(context.Background(), clusterSetName, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Delete(context.Background(), clusterSetName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // delete placement - err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Delete(context.TODO(), placementName, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Delete(context.TODO(), placementName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -201,7 +201,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor Name: clusterSetName, }, } - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.Background(), clusterset, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.Background(), clusterset, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) csb := &clusterapiv1beta2.ManagedClusterSetBinding{ @@ -213,7 +213,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor ClusterSet: clusterSetName, }, } - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.Background(), csb, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.Background(), csb, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) numOfClusters := 3 @@ -228,7 +228,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor }, }, } - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), cluster, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), cluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ns := &corev1.Namespace{ @@ -236,7 +236,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor Name: clsName, }, } - _, err = t.HubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + _, err = hub.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } @@ -256,7 +256,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor }, } - _, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Create(context.TODO(), placement, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Create(context.TODO(), placement, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By(fmt.Sprintf("Create manifestWorkReplicaSet %s", mwReplicaSetName)) @@ -289,12 +289,12 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor PlacementRefs: []workapiv1alpha1.LocalPlacementReference{placementRef}, }, } - _, err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Create(context.TODO(), mwReplicaSet, metav1.CreateOptions{}) + _, err = hub.WorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Create(context.TODO(), mwReplicaSet, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Check manifestWork replicaSet status is updated") gomega.Eventually(func() error { - mwrSet, err := t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Get(context.TODO(), mwReplicaSetName, metav1.GetOptions{}) + mwrSet, err := hub.WorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Get(context.TODO(), mwReplicaSetName, metav1.GetOptions{}) if err != nil { return err } @@ -311,7 +311,7 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor ginkgo.By("Check manifestWorks are created") gomega.Eventually(func() error { - manifestWorkList, err := t.HubWorkClient.WorkV1().ManifestWorks("").List(context.TODO(), metav1.ListOptions{ + manifestWorkList, err := hub.WorkClient.WorkV1().ManifestWorks("").List(context.TODO(), metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s.%s", mwrSetLabel, namespace, mwReplicaSetName), }) if err != nil { @@ -325,12 +325,12 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", ginkgo.Label("manifestwor }).Should(gomega.Succeed()) ginkgo.By("Delete manifestWorkReplicaSet") - err = t.HubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Delete(context.TODO(), mwReplicaSetName, metav1.DeleteOptions{}) + err = hub.WorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespace).Delete(context.TODO(), mwReplicaSetName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Check manifestworks are deleted") gomega.Eventually(func() error { - manifestWorkList, err := t.HubWorkClient.WorkV1().ManifestWorks("").List(context.TODO(), metav1.ListOptions{ + manifestWorkList, err := hub.WorkClient.WorkV1().ManifestWorks("").List(context.TODO(), metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s.%s", mwrSetLabel, namespace, mwReplicaSetName), }) if err != nil { diff --git a/test/e2e/placement_test.go b/test/e2e/placement_test.go index 59584ed14..d3a1c0365 100644 --- a/test/e2e/placement_test.go +++ b/test/e2e/placement_test.go @@ -51,14 +51,14 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), }, }, } - _, err := t.HubKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + _, err := hub.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { var errs []error ginkgo.By("Delete managedclustersets") - err := t.ClusterClient.ClusterV1beta2().ManagedClusterSets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{ + err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: e2eTestLabel + "=" + e2eTestLabelValue, }) if err != nil { @@ -66,7 +66,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), } ginkgo.By("Delete managedclusters") - err = t.ClusterClient.ClusterV1().ManagedClusters().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{ + err = hub.ClusterClient.ClusterV1().ManagedClusters().DeleteCollection(context.Background(), metav1.DeleteOptions{}, metav1.ListOptions{ LabelSelector: e2eTestLabel + "=" + e2eTestLabelValue, }) if err != nil { @@ -74,7 +74,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), } ginkgo.By("Delete namespace") - err = t.HubKubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) + err = hub.KubeClient.CoreV1().Namespaces().Delete(context.Background(), namespace, metav1.DeleteOptions{}) if err != nil { errs = append(errs, err) } @@ -85,7 +85,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), assertPlacementDecisionCreated := func(placement *clusterapiv1beta1.Placement) { ginkgo.By("Check if placementdecision is created") gomega.Eventually(func() bool { - pdl, err := t.ClusterClient.ClusterV1beta1().PlacementDecisions(namespace).List(context.Background(), metav1.ListOptions{ + pdl, err := hub.ClusterClient.ClusterV1beta1().PlacementDecisions(namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: clusterapiv1beta1.PlacementLabel + "=" + placement.Name, }) if err != nil { @@ -108,7 +108,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), ginkgo.By("Check the number of decisions in placementdecisions") desiredNOPD := desiredNOD/maxNumOfClusterDecisions + 1 gomega.Eventually(func() bool { - pdl, err := t.ClusterClient.ClusterV1beta1().PlacementDecisions(namespace).List(context.Background(), metav1.ListOptions{ + pdl, err := hub.ClusterClient.ClusterV1beta1().PlacementDecisions(namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: clusterapiv1beta1.PlacementLabel + "=" + placementName, }) if err != nil { @@ -128,7 +128,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), assertPlacementStatus := func(placementName string, numOfSelectedClusters int, satisfied bool) { ginkgo.By("Check the status of placement") gomega.Eventually(func() bool { - placement, err := t.ClusterClient.ClusterV1beta1().Placements(namespace).Get(context.Background(), placementName, metav1.GetOptions{}) + placement, err := hub.ClusterClient.ClusterV1beta1().Placements(namespace).Get(context.Background(), placementName, metav1.GetOptions{}) if err != nil { return false } @@ -176,7 +176,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), }, } } - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.Background(), clusterset, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.Background(), clusterset, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } @@ -194,7 +194,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), ClusterSet: clusterSetName, }, } - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.Background(), csb, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.Background(), csb, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } @@ -219,7 +219,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), Labels: labels, }, } - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), cluster, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), cluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) } } @@ -244,7 +244,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), }, } - placement, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Create(context.Background(), placement, metav1.CreateOptions{}) + placement, err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Create(context.Background(), placement, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) assertPlacementDecisionCreated(placement) @@ -261,13 +261,13 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), ginkgo.By("Reduce NOC of the placement") gomega.Eventually(func() error { - placement, err := t.ClusterClient.ClusterV1beta1().Placements(namespace).Get(context.Background(), placementName, metav1.GetOptions{}) + placement, err := hub.ClusterClient.ClusterV1beta1().Placements(namespace).Get(context.Background(), placementName, metav1.GetOptions{}) if err != nil { return err } noc := int32(6) placement.Spec.NumberOfClusters = &noc - _, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Update(context.Background(), placement, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Update(context.Background(), placement, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) @@ -283,12 +283,12 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), assertPlacementStatus(placementName, 6, true) ginkgo.By("Delete placement") - err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Delete(context.TODO(), placementName, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Delete(context.TODO(), placementName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Check if placementdecisions are deleted as well") gomega.Eventually(func() bool { - placementDecisions, err := t.ClusterClient.ClusterV1beta1().PlacementDecisions(namespace).List(context.TODO(), metav1.ListOptions{ + placementDecisions, err := hub.ClusterClient.ClusterV1beta1().PlacementDecisions(namespace).List(context.TODO(), metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s", clusterapiv1beta1.PlacementLabel, placementName), }) if err != nil { @@ -306,7 +306,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), ginkgo.By("Add cluster predicate") gomega.Eventually(func() error { - placement, err := t.ClusterClient.ClusterV1beta1().Placements(namespace).Get(context.Background(), placementName, metav1.GetOptions{}) + placement, err := hub.ClusterClient.ClusterV1beta1().Placements(namespace).Get(context.Background(), placementName, metav1.GetOptions{}) if err != nil { return err } @@ -321,7 +321,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), }, }, } - _, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Update(context.Background(), placement, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Update(context.Background(), placement, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) @@ -330,7 +330,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), assertPlacementStatus(placementName, 0, false) ginkgo.By("Delete placement") - err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Delete(context.TODO(), placementName, metav1.DeleteOptions{}) + err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Delete(context.TODO(), placementName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -351,7 +351,7 @@ var _ = ginkgo.Describe("Placement", ginkgo.Label("placement", "sanity-check"), }, } - _, err = t.ClusterClient.ClusterV1beta1().Placements(namespace).Create(context.Background(), placement, metav1.CreateOptions{}) + _, err = hub.ClusterClient.ClusterV1beta1().Placements(namespace).Create(context.Background(), placement, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) }) diff --git a/test/e2e/registration_taint_update_test.go b/test/e2e/registration_taint_update_test.go index b1c67826f..0ed51c4ef 100644 --- a/test/e2e/registration_taint_update_test.go +++ b/test/e2e/registration_taint_update_test.go @@ -33,17 +33,17 @@ var _ = ginkgo.Describe("Taints update check", ginkgo.Label("registration-taint" HubAcceptsClient: true, }, } - managedCluster, err = t.ClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) + managedCluster, err = hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.Background(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { - err := t.ClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) + err := hub.ClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), clusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should update taints automatically", func() { - managedClusters := t.ClusterClient.ClusterV1().ManagedClusters() + managedClusters := hub.ClusterClient.ClusterV1().ManagedClusters() ginkgo.By("Should only be one UnreachableTaint") gomega.Eventually(func() error { diff --git a/test/e2e/registration_webhook_test.go b/test/e2e/registration_webhook_test.go index 0ad1a1af7..ebdd35b13 100644 --- a/test/e2e/registration_webhook_test.go +++ b/test/e2e/registration_webhook_test.go @@ -37,27 +37,29 @@ var _ = ginkgo.Describe("Admission webhook", func() { }) ginkgo.AfterEach(func() { - gomega.Expect(t.DeleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.DeleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred()) }) ginkgo.Context("Creating a managed cluster", func() { ginkgo.It("Should have the default LeaseDurationSeconds", func() { ginkgo.By(fmt.Sprintf("create a managed cluster %q", clusterName)) - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), newManagedCluster(clusterName, false, validURL), metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), + newManagedCluster(clusterName, false, validURL), metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), + clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Spec.LeaseDurationSeconds).To(gomega.Equal(int32(60))) }) ginkgo.It("Should have the default Clusterset Label (no labels in cluster)", func() { ginkgo.By(fmt.Sprintf("create a managed cluster %q", clusterName)) oriManagedCluster := newManagedCluster(clusterName, false, validURL) - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), oriManagedCluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), oriManagedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Labels[clusterv1beta2.ClusterSetLabel]).To(gomega.Equal(string(defaultClusterSetName))) @@ -71,10 +73,10 @@ var _ = ginkgo.Describe("Admission webhook", func() { oriManagedCluster.Labels = map[string]string{ "test": "test_value", } - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), oriManagedCluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), oriManagedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Labels[clusterv1beta2.ClusterSetLabel]).To(gomega.Equal(string(defaultClusterSetName))) @@ -88,10 +90,10 @@ var _ = ginkgo.Describe("Admission webhook", func() { oriManagedCluster.Labels = map[string]string{ clusterv1beta2.ClusterSetLabel: "", } - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), oriManagedCluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), oriManagedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Labels[clusterv1beta2.ClusterSetLabel]).To(gomega.Equal(string(defaultClusterSetName))) @@ -107,11 +109,11 @@ var _ = ginkgo.Describe("Admission webhook", func() { Effect: clusterv1.TaintEffectNoSelect, }, } - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), cluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), cluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("check if timeAdded of the taint is set automatically") - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) taint := findTaint(managedCluster.Spec.Taints, "a", "b", clusterv1.TaintEffectNoSelect) gomega.Expect(taint).ShouldNot(gomega.BeNil()) @@ -121,7 +123,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // sleep and make sure the update is performed 1 second later than the creation time.Sleep(1 * time.Second) gomega.Eventually(func() error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) if err != nil { return err } @@ -138,12 +140,12 @@ var _ = ginkgo.Describe("Admission webhook", func() { Effect: clusterv1.TaintEffectNoSelectIfNew, } } - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) return err }).Should(gomega.Succeed()) ginkgo.By("check if timeAdded of the taint is reset") - managedCluster, err = t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err = hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) updatedTaint := findTaint(managedCluster.Spec.Taints, "a", "b", clusterv1.TaintEffectNoSelectIfNew) gomega.Expect(updatedTaint).ShouldNot(gomega.BeNil()) @@ -156,7 +158,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { managedCluster := newManagedCluster(clusterName, false, invalidURL) - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) gomega.Expect(err.Error()).Should(gomega.Equal(fmt.Sprintf( @@ -171,7 +173,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { clusterName = fmt.Sprintf("webhook.spoke-%s", rand.String(6)) managedCluster := newManagedCluster(clusterName, false, validURL) - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) @@ -183,7 +185,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedCluster // but cannot change the ManagedCluster HubAcceptsClient field - unauthorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -197,8 +199,8 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = unauthorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - gomega.Expect(t.DeleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.DeleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should forbid the request when creating a managed cluster with a termaniting namespace", func() { @@ -206,7 +208,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) - authorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -227,7 +229,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) // create a namespace, add a finilizer to it, and delete it - _, err = t.HubKubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ + _, err = hub.KubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, Finalizers: []string{ @@ -238,7 +240,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) // delete the namespace - err = t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) + err = hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // create a managed cluster, should be denied @@ -247,20 +249,20 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) // remove the finalizer to truly delete the namespace - ns, err := t.HubKubeClient.CoreV1().Namespaces().Get(context.TODO(), clusterName, metav1.GetOptions{}) + ns, err := hub.KubeClient.CoreV1().Namespaces().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ns.Finalizers = []string{} - _, err = t.HubKubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) + _, err = hub.KubeClient.CoreV1().Namespaces().Update(context.TODO(), ns, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should accept the request when creating an accepted managed cluster by authorized user", func() { sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) ginkgo.By(fmt.Sprintf("create an managed cluster %q with authorized service account %q", clusterName, sa)) - authorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -284,7 +286,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = authorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should accept the request when update managed cluster other field by unauthorized user", func() { @@ -294,7 +296,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedCluster // but cannot change the ManagedCluster HubAcceptsClient field - unauthorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -305,11 +307,11 @@ var _ = ginkgo.Describe("Admission webhook", func() { managedCluster := newManagedCluster(clusterName, true, validURL) - managedCluster, err = t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) + managedCluster, err = hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedCluster.Name, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedCluster.Name, metav1.GetOptions{}) if err != nil { return err } @@ -320,7 +322,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { return err }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should accept the request when creating a managed cluster with clusterset specified by authorized user", func() { @@ -333,14 +335,14 @@ var _ = ginkgo.Describe("Admission webhook", func() { }, } - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) ginkgo.By(fmt.Sprintf("create a managed cluster %q with unauthorized service account %q", clusterName, sa)) - authorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -361,7 +363,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = authorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should forbid the request when creating a managed cluster with clusterset specified by unauthorized user", func() { @@ -374,7 +376,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { }, } - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) @@ -383,7 +385,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedCluster // but cannot set the clusterset label - unauthorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -399,7 +401,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = unauthorizedClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) }) @@ -409,28 +411,28 @@ var _ = ginkgo.Describe("Admission webhook", func() { ginkgo.By(fmt.Sprintf("Creating managed cluster %q", clusterName)) clusterName = fmt.Sprintf("webhook-spoke-%s", rand.String(6)) managedCluster := newManagedCluster(clusterName, false, validURL) - _, err := t.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Create(context.TODO(), managedCluster, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { ginkgo.By(fmt.Sprintf("Cleaning managed cluster %q", clusterName)) - gomega.Expect(t.DeleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.DeleteManageClusterAndRelatedNamespace(clusterName)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should not update the LeaseDurationSeconds to zero", func() { ginkgo.By(fmt.Sprintf("try to update managed cluster %q LeaseDurationSeconds to zero", clusterName)) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) managedCluster.Spec.LeaseDurationSeconds = 0 - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) return err }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Spec.LeaseDurationSeconds).To(gomega.Equal(int32(60))) }) @@ -438,16 +440,16 @@ var _ = ginkgo.Describe("Admission webhook", func() { ginkgo.It("Should not delete the default ClusterSet Label", func() { ginkgo.By(fmt.Sprintf("try to update managed cluster %q ClusterSet label", clusterName)) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) delete(managedCluster.Labels, clusterv1beta2.ClusterSetLabel) - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) return err }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Labels[clusterv1beta2.ClusterSetLabel]).To(gomega.Equal(string(defaultClusterSetName))) }) @@ -455,17 +457,17 @@ var _ = ginkgo.Describe("Admission webhook", func() { ginkgo.It("Should not update the other ClusterSet Label", func() { ginkgo.By(fmt.Sprintf("try to update managed cluster %q ClusterSet label", clusterName)) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) managedCluster.Labels[clusterv1beta2.ClusterSetLabel] = "s1" - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) return err }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(managedCluster.Labels[clusterv1beta2.ClusterSetLabel]).To(gomega.Equal("s1")) }) @@ -474,11 +476,11 @@ var _ = ginkgo.Describe("Admission webhook", func() { ginkgo.By(fmt.Sprintf("update managed cluster %q with an invalid external server URL %q", clusterName, invalidURL)) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - managedCluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) managedCluster.Spec.ManagedClusterClientConfigs[0].URL = invalidURL - _, err = t.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) return err }) gomega.Expect(err).To(gomega.HaveOccurred()) @@ -491,7 +493,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedCluster // but cannot change the ManagedCluster HubAcceptsClient field - unauthorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -511,13 +513,13 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should forbid the request when updating a managed cluster with a terminating namespace", func() { sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) var err error - authorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -538,7 +540,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) // create a namespace, add a finilizer to it, and delete it - _, err = t.HubKubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ + _, err = hub.KubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ Name: clusterName, Finalizers: []string{ @@ -549,7 +551,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) // delete the namespace - err = t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) + err = hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // update the HubAcceptsClient field to true @@ -563,7 +565,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { }) gomega.Expect(errors.IsForbidden(err)).To(gomega.BeTrue()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should accept the request when updating the clusterset of a managed cluster by authorized user", func() { @@ -576,13 +578,13 @@ var _ = ginkgo.Describe("Admission webhook", func() { }, } - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) ginkgo.By(fmt.Sprintf("accept managed cluster %q by an unauthorized user %q", clusterName, sa)) - authorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -608,7 +610,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { }) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("Should forbid the request when updating the clusterset of a managed cluster by unauthorized user", func() { @@ -621,7 +623,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { }, } - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSets().Create(context.TODO(), managedClusterSet, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) @@ -629,7 +631,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedCluster // but cannot change the clusterset label - unauthorizedClient, err := t.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(saNamespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclusters"}, @@ -657,7 +659,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - gomega.Expect(t.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(saNamespace, sa)).ToNot(gomega.HaveOccurred()) }) }) }) @@ -675,23 +677,24 @@ var _ = ginkgo.Describe("Admission webhook", func() { Name: namespace, }, } - _, err := t.HubKubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + _, err := hub.KubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // make sure the managedclusterset can be created successfully gomega.Eventually(func() error { clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) if err != nil { return err } - return t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{}) + return hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Delete(context.TODO(), clusterSetName, metav1.DeleteOptions{}) }).Should(gomega.Succeed()) }) ginkgo.AfterEach(func() { - err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}) + err := hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), namespace, metav1.DeleteOptions{}) if errors.IsNotFound(err) { return } @@ -703,7 +706,8 @@ var _ = ginkgo.Describe("Admission webhook", func() { clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) clusterSetBindingName := fmt.Sprintf("clustersetbinding-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetBindingName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) @@ -712,7 +716,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) - authorizedClient, err := t.BuildClusterClient(namespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(namespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclustersets/bind"}, @@ -731,7 +735,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = authorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("should forbid the request when creating a ManagedClusterSetBinding by unauthorized user", func() { @@ -740,7 +744,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedClusterSetBinding // but cannot bind ManagedClusterSet - unauthorizedClient, err := t.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclustersetbindings"}, @@ -750,11 +754,12 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).ToNot(gomega.HaveOccurred()) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - _, err = unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err = unauthorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - gomega.Expect(t.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) }) }) @@ -763,14 +768,14 @@ var _ = ginkgo.Describe("Admission webhook", func() { // create a cluster set binding clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - managedClusterSetBinding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create( + managedClusterSetBinding, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create( context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // update the cluster set binding clusterSetName = fmt.Sprintf("clusterset-%s", rand.String(6)) patch := fmt.Sprintf("{\"spec\": {\"clusterSet\": %q}}", clusterSetName) - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch( + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch( context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) @@ -780,12 +785,13 @@ var _ = ginkgo.Describe("Admission webhook", func() { // create a cluster set binding clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // create a client without clusterset binding permission sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) - unauthorizedClient, err := t.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclustersetbindings"}, @@ -812,7 +818,8 @@ var _ = ginkgo.Describe("Admission webhook", func() { clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) clusterSetBindingName := fmt.Sprintf("clustersetbinding-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetBindingName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) @@ -821,7 +828,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) - authorizedClient, err := t.BuildClusterClient(namespace, sa, []rbacv1.PolicyRule{ + authorizedClient, err := hub.BuildClusterClient(namespace, sa, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclustersets/bind"}, @@ -840,7 +847,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { _, err = authorizedClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - gomega.Expect(t.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) }) ginkgo.It("should forbid the request when creating a ManagedClusterSetBinding by unauthorized user", func() { @@ -849,7 +856,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { // prepare an unauthorized cluster client from a service account who can create/get/update ManagedClusterSetBinding // but cannot bind ManagedClusterSet - unauthorizedClient, err := t.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclustersetbindings"}, @@ -863,7 +870,7 @@ var _ = ginkgo.Describe("Admission webhook", func() { gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsForbidden(err)).Should(gomega.BeTrue()) - gomega.Expect(t.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) + gomega.Expect(hub.CleanupClusterClient(namespace, sa)).ToNot(gomega.HaveOccurred()) }) }) @@ -872,14 +879,14 @@ var _ = ginkgo.Describe("Admission webhook", func() { // create a cluster set binding clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - managedClusterSetBinding, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create( + managedClusterSetBinding, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create( context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // update the cluster set binding clusterSetName = fmt.Sprintf("clusterset-%s", rand.String(6)) patch := fmt.Sprintf("{\"spec\": {\"clusterSet\": %q}}", clusterSetName) - _, err = t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch( + _, err = hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Patch( context.TODO(), managedClusterSetBinding.Name, types.MergePatchType, []byte(patch), metav1.PatchOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) @@ -889,12 +896,13 @@ var _ = ginkgo.Describe("Admission webhook", func() { // create a cluster set binding clusterSetName := fmt.Sprintf("clusterset-%s", rand.String(6)) managedClusterSetBinding := newManagedClusterSetBinding(namespace, clusterSetName, clusterSetName) - _, err := t.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace).Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) + _, err := hub.ClusterClient.ClusterV1beta2().ManagedClusterSetBindings(namespace). + Create(context.TODO(), managedClusterSetBinding, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) // create a client without clusterset binding permission sa := fmt.Sprintf("webhook-sa-%s", rand.String(6)) - unauthorizedClient, err := t.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ + unauthorizedClient, err := hub.BuildClusterClient(namespace, sa, nil, []rbacv1.PolicyRule{ { APIGroups: []string{"cluster.open-cluster-management.io"}, Resources: []string{"managedclustersetbindings"}, diff --git a/test/e2e/work_webhook_test.go b/test/e2e/work_webhook_test.go index 333d31918..342964da7 100644 --- a/test/e2e/work_webhook_test.go +++ b/test/e2e/work_webhook_test.go @@ -11,6 +11,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/util/retry" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" @@ -32,20 +33,20 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati ginkgo.AfterEach(func() { ginkgo.By(fmt.Sprintf("delete manifestwork %v/%v", universalClusterName, workName)) - gomega.Expect(t.cleanManifestWorks(universalClusterName, workName)).To(gomega.BeNil()) + gomega.Expect(hub.CleanManifestWorks(universalClusterName, workName)).To(gomega.BeNil()) }) ginkgo.Context("Creating a manifestwork", func() { ginkgo.It("Should respond bad request when creating a manifestwork with no manifests", func() { work := newManifestWork(universalClusterName, workName) - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) ginkgo.It("Should respond bad request when creating a manifest with no name", func() { work := newManifestWork(universalClusterName, workName, []runtime.Object{util.NewConfigmap("default", "", nil, nil)}...) - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).To(gomega.HaveOccurred()) gomega.Expect(errors.IsBadRequest(err)).Should(gomega.BeTrue()) }) @@ -63,7 +64,7 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati } // create a temporary role - _, err := t.HubKubeClient.RbacV1().Roles(universalClusterName).Create( + _, err := hub.KubeClient.RbacV1().Roles(universalClusterName).Create( context.TODO(), &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Namespace: universalClusterName, @@ -80,7 +81,7 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati gomega.Expect(err).ToNot(gomega.HaveOccurred()) // create a temporary rolebinding - _, err = t.HubKubeClient.RbacV1().RoleBindings(universalClusterName).Create( + _, err = hub.KubeClient.RbacV1().RoleBindings(universalClusterName).Create( context.TODO(), &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Namespace: universalClusterName, @@ -104,13 +105,13 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati ginkgo.AfterEach(func() { // delete the temporary role - err := t.HubKubeClient.RbacV1().Roles(universalClusterName).Delete(context.TODO(), roleName, metav1.DeleteOptions{}) + err := hub.KubeClient.RbacV1().Roles(universalClusterName).Delete(context.TODO(), roleName, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { gomega.Expect(err).ToNot(gomega.HaveOccurred()) } // delete the temporary rolebinding - err = t.HubKubeClient.RbacV1().RoleBindings(universalClusterName).Delete(context.TODO(), roleName, metav1.DeleteOptions{}) + err = hub.KubeClient.RbacV1().RoleBindings(universalClusterName).Delete(context.TODO(), roleName, metav1.DeleteOptions{}) if !errors.IsNotFound(err) { gomega.Expect(err).ToNot(gomega.HaveOccurred()) } @@ -120,7 +121,9 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati work := newManifestWork(universalClusterName, workName, []runtime.Object{util.NewConfigmap("default", "cm1", nil, nil)}...) // impersonate as a hub user without execute-as permission - impersonatedConfig := *t.HubClusterCfg + hubClusterCfg, err := clientcmd.BuildConfigFromFlags("", hubKubeconfig) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + impersonatedConfig := *hubClusterCfg impersonatedConfig.Impersonate.UserName = fmt.Sprintf("system:serviceaccount:%s:%s", universalClusterName, hubUser) impersonatedHubWorkClient, err := workclientset.NewForConfig(&impersonatedConfig) gomega.Expect(err).ToNot(gomega.HaveOccurred()) @@ -140,7 +143,7 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati ginkgo.BeforeEach(func() { work := newManifestWork(universalClusterName, workName, []runtime.Object{util.NewConfigmap("default", "cm1", nil, nil)}...) - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) @@ -148,11 +151,11 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati manifest := workapiv1.Manifest{} manifest.Object = util.NewConfigmap("default", "", nil, nil) err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - work, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + work, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, manifest) - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }) @@ -190,11 +193,11 @@ var _ = ginkgo.Describe("ManifestWork admission webhook", ginkgo.Label("validati } err := retry.RetryOnConflict(retry.DefaultRetry, func() error { - work, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + work, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) work.Spec.Workload.Manifests = append(work.Spec.Workload.Manifests, manifests...) - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }) diff --git a/test/e2e/work_workload_test.go b/test/e2e/work_workload_test.go index a016e861a..adefc5f7b 100644 --- a/test/e2e/work_workload_test.go +++ b/test/e2e/work_workload_test.go @@ -157,7 +157,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") ginkgo.AfterEach(func() { ginkgo.By(fmt.Sprintf("delete manifestwork %v/%v", universalClusterName, workName)) - gomega.Expect(t.cleanManifestWorks(universalClusterName, workName)).To(gomega.BeNil()) + gomega.Expect(hub.CleanManifestWorks(universalClusterName, workName)).To(gomega.BeNil()) }) ginkgo.Context("Work CRUD", func() { @@ -171,24 +171,24 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") // create ns2 ns := &corev1.Namespace{} ns.Name = ns2 - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + _, err = spoke.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { // remove finalizer from cm3 if necessary - cm3, err := t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) + cm3, err := spoke.KubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) if err == nil { cm3.Finalizers = nil - err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Delete(context.Background(), "cm3", metav1.DeleteOptions{}) + err = spoke.KubeClient.CoreV1().ConfigMaps(ns2).Delete(context.Background(), "cm3", metav1.DeleteOptions{}) } if err != nil { gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) } // delete ns2 - err = t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), ns2, metav1.DeleteOptions{}) + err = spoke.KubeClient.CoreV1().Namespaces().Delete(context.Background(), ns2, metav1.DeleteOptions{}) if err != nil { gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) } @@ -205,27 +205,27 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") util.NewConfigmap(ns2, "cm3", nil, cmFinalizers), } work := newManifestWork(universalClusterName, workName, objects...) - work, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // check if resources are applied for manifests gomega.Eventually(func() error { - _, err := t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err := spoke.KubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) if err != nil { return err } - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) if err != nil { return err } - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) if err != nil { return err } - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) @@ -240,7 +240,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") // get the corresponding AppliedManifestWork var appliedManifestWork *workapiv1.AppliedManifestWork gomega.Eventually(func() error { - appliedManifestWorkList, err := t.SpokeWorkClient.WorkV1().AppliedManifestWorks().List(context.Background(), metav1.ListOptions{}) + appliedManifestWorkList, err := spoke.WorkClient.WorkV1().AppliedManifestWorks().List(context.Background(), metav1.ListOptions{}) if err != nil { return err } @@ -285,19 +285,19 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") } newWork := newManifestWork(universalClusterName, workName, newObjects...) gomega.Eventually(func() error { - work, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + work, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) if err != nil { return err } work.Spec.Workload.Manifests = newWork.Spec.Workload.Manifests - work, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + work, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) return err }).Should(gomega.Succeed()) // check if cm1 is removed from applied resources list in status gomega.Eventually(func() error { - appliedManifestWork, err = t.SpokeWorkClient.WorkV1().AppliedManifestWorks().Get( + appliedManifestWork, err = spoke.WorkClient.WorkV1().AppliedManifestWorks().Get( context.Background(), appliedManifestWork.Name, metav1.GetOptions{}) if err != nil { return err @@ -316,12 +316,12 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") }).ShouldNot(gomega.HaveOccurred()) // check if cm1 is deleted - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) // check if cm3 is updated gomega.Eventually(func() error { - cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) + cm, err := spoke.KubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) if err != nil { return err } @@ -334,41 +334,41 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("delete manifestwork") - err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) + err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // remove finalizer from cm3 in 2 seconds timer := time.NewTimer(2 * time.Second) go func() { <-timer.C - cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) + cm, err := spoke.KubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) if err == nil { cm.Finalizers = nil - _, _ = t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Update(context.Background(), cm, metav1.UpdateOptions{}) + _, _ = spoke.KubeClient.CoreV1().ConfigMaps(ns2).Update(context.Background(), cm, metav1.UpdateOptions{}) } }() // wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) return errors.IsNotFound(err) }).Should(gomega.BeTrue()) // Once manifest work is deleted, its corresponding appliedManifestWorks should be deleted as well - _, err = t.SpokeWorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWork.Name, metav1.GetOptions{}) + _, err = spoke.WorkClient.WorkV1().AppliedManifestWorks().Get(context.Background(), appliedManifestWork.Name, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) // Once manifest work is deleted, all applied resources should have already been deleted too - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(ns2).Get(context.Background(), "cm3", metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) - err = t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), ns2, metav1.DeleteOptions{}) + err = spoke.KubeClient.CoreV1().Namespaces().Delete(context.Background(), ns2, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) }) @@ -386,12 +386,12 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") newJob(jobName), } work := newManifestWork(universalClusterName, workName, objects...) - work, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + work, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // check status conditions in manifestwork status gomega.Eventually(func() error { - work, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + work, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) if err != nil { return err } @@ -408,7 +408,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") // Ensure pod is created gomega.Eventually(func() error { - pods, err := t.SpokeKubeClient.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{}) + pods, err := spoke.KubeClient.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{}) if err != nil { return err } @@ -421,12 +421,12 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("delete manifestwork") - err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) + err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // pods should be all cleaned. gomega.Eventually(func() error { - pods, err := t.SpokeKubeClient.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{}) + pods, err := spoke.KubeClient.CoreV1().Pods("default").List(context.Background(), metav1.ListOptions{}) if err != nil { return err } @@ -449,13 +449,13 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") // create namespace for cr ns := &corev1.Namespace{} ns.Name = crNamespace - _, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) + _, err = spoke.KubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { // delete namespace for cr - err = t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), crNamespace, metav1.DeleteOptions{}) + err = spoke.KubeClient.CoreV1().Namespaces().Delete(context.Background(), crNamespace, metav1.DeleteOptions{}) if err != nil { gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) } @@ -473,7 +473,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") objects := []runtime.Object{crd, clusterRole, cr} work := newManifestWork(universalClusterName, workName, objects...) - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // check status conditions in manifestwork status @@ -485,7 +485,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") // Upgrade crd/cr and check if cr resource is recreated. // Get UID of cr resource at first. - guestbook, err := t.SpokeDynamicClient.Resource(schema.GroupVersionResource{ + guestbook, err := spoke.DynamicClient.Resource(schema.GroupVersionResource{ Resource: "guestbooks", Version: "v1", Group: "my.domain", @@ -500,15 +500,15 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") work = newManifestWork(universalClusterName, workName, objects...) // Update work - existingWork, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + existingWork, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) work.ResourceVersion = existingWork.ResourceVersion - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Update(context.Background(), work, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // check if v2 cr is applied gomega.Eventually(func() error { - guestbook, err := t.SpokeDynamicClient.Resource(schema.GroupVersionResource{ + guestbook, err := spoke.DynamicClient.Resource(schema.GroupVersionResource{ Resource: "guestbooks", Version: "v2", Group: "my.domain", @@ -558,12 +558,12 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") }, }, } - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // Check deployment status gomega.Eventually(func() error { - work, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) + work, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(context.Background(), workName, metav1.GetOptions{}) if err != nil { return err } @@ -625,20 +625,20 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") cmName = "cm1" ns := &corev1.Namespace{} ns.Name = nsName - _, err := t.SpokeKubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) + _, err := spoke.KubeClient.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) objects := []runtime.Object{ util.NewConfigmap(nsName, cmName, nil, nil), } work := newManifestWork(universalClusterName, workName, objects...) - _, err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create( + _, err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create( context.Background(), work, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) ginkgo.AfterEach(func() { - err := t.SpokeKubeClient.CoreV1().Namespaces().Delete(ctx, nsName, metav1.DeleteOptions{}) + err := spoke.KubeClient.CoreV1().Namespaces().Delete(ctx, nsName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) @@ -649,13 +649,13 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") util.NewConfigmap(nsName, cmName, nil, nil), } work2 := newManifestWork(universalClusterName, work2Name, objects...) - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Create(ctx, work2, metav1.CreateOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Create(ctx, work2, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) for _, name := range []string{workName, work2Name} { // check status conditions in manifestwork status gomega.Eventually(func() error { - work, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get( + work, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get( ctx, name, metav1.GetOptions{}) if err != nil { return err @@ -670,7 +670,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") cmUID := types.UID("test") // check if resources are applied for manifests gomega.Eventually(func() error { - cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + cm, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) if err != nil { return err } @@ -683,30 +683,30 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("delete manifestwork mw1") - err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(ctx, workName, metav1.DeleteOptions{}) + err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(ctx, workName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(ctx, workName, metav1.GetOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(ctx, workName, metav1.GetOptions{}) return errors.IsNotFound(err) }).Should(gomega.BeTrue()) - cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + cm, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(cm.UID).To(gomega.Equal(cmUID)) ginkgo.By("delete manifestwork mw2") - err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(ctx, work2Name, metav1.DeleteOptions{}) + err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(ctx, work2Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) // wait for deletion of manifest work gomega.Eventually(func() bool { - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(ctx, work2Name, metav1.GetOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(ctx, work2Name, metav1.GetOptions{}) return errors.IsNotFound(err) }).Should(gomega.BeTrue()) - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) }) @@ -718,16 +718,16 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") }).ShouldNot(gomega.HaveOccurred()) ginkgo.By("check if resources are applied for manifests") - _, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + _, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("Add a non-appliedManifestWork owner to the applied resource") - cmOwner, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Create(ctx, + cmOwner, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Create(ctx, util.NewConfigmap(nsName, "owner", nil, nil), metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Eventually(func() error { - cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + cm, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) if err != nil { return err } @@ -738,31 +738,31 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") APIVersion: "v1", }) - _, err = t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Update(ctx, cm, metav1.UpdateOptions{}) + _, err = spoke.KubeClient.CoreV1().ConfigMaps(nsName).Update(ctx, cm, metav1.UpdateOptions{}) return err }).ShouldNot(gomega.HaveOccurred()) - cm, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + cm, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(len(cm.OwnerReferences) == 2).To(gomega.BeTrue()) ginkgo.By("delete manifestwork mw1") - err = t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(ctx, workName, metav1.DeleteOptions{}) + err = hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Delete(ctx, workName, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) ginkgo.By("wait for deletion of manifest work") gomega.Eventually(func() bool { - _, err := t.HubWorkClient.WorkV1().ManifestWorks(universalClusterName).Get(ctx, workName, metav1.GetOptions{}) + _, err := hub.WorkClient.WorkV1().ManifestWorks(universalClusterName).Get(ctx, workName, metav1.GetOptions{}) return errors.IsNotFound(err) }).Should(gomega.BeTrue()) ginkgo.By("check the resource cm was deleted successfully") gomega.Eventually(func() bool { - _, err := t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) + _, err := spoke.KubeClient.CoreV1().ConfigMaps(nsName).Get(ctx, cmName, metav1.GetOptions{}) return errors.IsNotFound(err) }).Should(gomega.BeTrue()) - err = t.SpokeKubeClient.CoreV1().ConfigMaps(nsName).Delete(ctx, cmOwner.Name, metav1.DeleteOptions{}) + err = spoke.KubeClient.CoreV1().ConfigMaps(nsName).Delete(ctx, cmOwner.Name, metav1.DeleteOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) }) }) @@ -770,7 +770,7 @@ var _ = ginkgo.Describe("Work agent", ginkgo.Label("work-agent", "sanity-check") func assertManifestWorkAppliedSuccessfully(workNamespace, workName string, expectedManifestStatuses []metav1.ConditionStatus) error { - work, err := t.HubWorkClient.WorkV1().ManifestWorks(workNamespace).Get( + work, err := hub.WorkClient.WorkV1().ManifestWorks(workNamespace).Get( context.Background(), workName, metav1.GetOptions{}) if err != nil { return err diff --git a/test/framework/clusterclient.go b/test/framework/clusterclient.go new file mode 100644 index 000000000..fd5cb1287 --- /dev/null +++ b/test/framework/clusterclient.go @@ -0,0 +1,157 @@ +package framework + +import ( + "context" + "fmt" + + authv1 "k8s.io/api/authentication/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + "k8s.io/utils/pointer" + + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" +) + +func (hub *Hub) BuildClusterClient(saNamespace, saName string, clusterPolicyRules, policyRules []rbacv1.PolicyRule) (clusterclient.Interface, error) { + var err error + + sa := &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: saNamespace, + Name: saName, + }, + } + _, err = hub.KubeClient.CoreV1().ServiceAccounts(saNamespace).Create(context.TODO(), sa, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + // create cluster role/rolebinding + if len(clusterPolicyRules) > 0 { + clusterRoleName := fmt.Sprintf("%s-clusterrole", saName) + clusterRole := &rbacv1.ClusterRole{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterRoleName, + }, + Rules: clusterPolicyRules, + } + _, err = hub.KubeClient.RbacV1().ClusterRoles().Create(context.TODO(), clusterRole, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + clusterRoleBinding := &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-clusterrolebinding", saName), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Namespace: saNamespace, + Name: saName, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: clusterRoleName, + }, + } + _, err = hub.KubeClient.RbacV1().ClusterRoleBindings().Create(context.TODO(), clusterRoleBinding, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } + + // create cluster role/rolebinding + if len(policyRules) > 0 { + roleName := fmt.Sprintf("%s-role", saName) + role := &rbacv1.Role{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: saNamespace, + Name: roleName, + }, + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"cluster.open-cluster-management.io"}, + Resources: []string{"managedclustersetbindings"}, + Verbs: []string{"create", "get", "update"}, + }, + }, + } + _, err = hub.KubeClient.RbacV1().Roles(saNamespace).Create(context.TODO(), role, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + + roleBinding := &rbacv1.RoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: saNamespace, + Name: fmt.Sprintf("%s-rolebinding", saName), + }, + Subjects: []rbacv1.Subject{ + { + Kind: "ServiceAccount", + Namespace: saNamespace, + Name: saName, + }, + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "Role", + Name: roleName, + }, + } + _, err = hub.KubeClient.RbacV1().RoleBindings(saNamespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}) + if err != nil { + return nil, err + } + } + + tokenRequest, err := hub.KubeClient.CoreV1().ServiceAccounts(saNamespace).CreateToken( + context.TODO(), + saName, + &authv1.TokenRequest{ + Spec: authv1.TokenRequestSpec{ + ExpirationSeconds: pointer.Int64(8640 * 3600), + }, + }, + metav1.CreateOptions{}, + ) + if err != nil { + return nil, err + } + + unauthorizedClusterClient, err := clusterclient.NewForConfig(&rest.Config{ + Host: hub.ClusterCfg.Host, + TLSClientConfig: rest.TLSClientConfig{ + CAData: hub.ClusterCfg.CAData, + }, + BearerToken: tokenRequest.Status.Token, + }) + return unauthorizedClusterClient, err +} + +func (hub *Hub) CleanupClusterClient(saNamespace, saName string) error { + err := hub.KubeClient.CoreV1().ServiceAccounts(saNamespace).Delete(context.TODO(), saName, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("delete sa %q/%q failed: %v", saNamespace, saName, err) + } + + // delete cluster role and cluster role binding if exists + clusterRoleName := fmt.Sprintf("%s-clusterrole", saName) + err = hub.KubeClient.RbacV1().ClusterRoles().Delete(context.TODO(), clusterRoleName, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("delete cluster role %q failed: %v", clusterRoleName, err) + } + clusterRoleBindingName := fmt.Sprintf("%s-clusterrolebinding", saName) + err = hub.KubeClient.RbacV1().ClusterRoleBindings().Delete(context.TODO(), clusterRoleBindingName, metav1.DeleteOptions{}) + if err != nil && !apierrors.IsNotFound(err) { + return fmt.Errorf("delete cluster role binding %q failed: %v", clusterRoleBindingName, err) + } + + return nil +} diff --git a/test/framework/clustermanager.go b/test/framework/clustermanager.go new file mode 100644 index 000000000..01a9246b6 --- /dev/null +++ b/test/framework/clustermanager.go @@ -0,0 +1,34 @@ +package framework + +import ( + "context" + "fmt" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + operatorapiv1 "open-cluster-management.io/api/operator/v1" +) + +func (hub *Hub) GetCluserManager() (*operatorapiv1.ClusterManager, error) { + return hub.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), hub.ClusterManagerName, metav1.GetOptions{}) +} + +func CheckClusterManagerStatus(cm *operatorapiv1.ClusterManager) error { + if meta.IsStatusConditionFalse(cm.Status.Conditions, "Applied") { + return fmt.Errorf("components of cluster manager are not all applied") + } + if meta.IsStatusConditionFalse(cm.Status.Conditions, "ValidFeatureGates") { + return fmt.Errorf("feature gates are not all valid") + } + if !meta.IsStatusConditionFalse(cm.Status.Conditions, "HubRegistrationDegraded") { + return fmt.Errorf("HubRegistration is degraded") + } + if !meta.IsStatusConditionFalse(cm.Status.Conditions, "HubPlacementDegraded") { + return fmt.Errorf("HubPlacement is degraded") + } + if !meta.IsStatusConditionFalse(cm.Status.Conditions, "Progressing") { + return fmt.Errorf("ClusterManager is still progressing") + } + return nil +} diff --git a/test/framework/common.go b/test/framework/common.go new file mode 100644 index 000000000..26a20470f --- /dev/null +++ b/test/framework/common.go @@ -0,0 +1,93 @@ +package framework + +import ( + "fmt" + + apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + addonclient "open-cluster-management.io/api/client/addon/clientset/versioned" + clusterclient "open-cluster-management.io/api/client/cluster/clientset/versioned" + operatorclient "open-cluster-management.io/api/client/operator/clientset/versioned" + workv1client "open-cluster-management.io/api/client/work/clientset/versioned" +) + +type Images struct { + RegistrationImage string + WorkImage string + SingletonImage string +} + +// OCMClients contains every kind of client that we need to interact with ocm components +type OCMClients struct { + KubeClient kubernetes.Interface + APIExtensionsClient apiextensionsclient.Interface + OperatorClient operatorclient.Interface + ClusterClient clusterclient.Interface + WorkClient workv1client.Interface + AddonClient addonclient.Interface + DynamicClient dynamic.Interface + RestMapper meta.RESTMapper +} + +func NewOCMClients(clusterCfg *rest.Config) (*OCMClients, error) { + kubeClient, err := kubernetes.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster client: %w", err) + } + + httpClient, err := rest.HTTPClientFor(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster http client: %w", err) + } + + restMapper, err := apiutil.NewDynamicRESTMapper(clusterCfg, httpClient) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster rest mapper: %w", err) + } + + dynamicClient, err := dynamic.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster dynamic client: %w", err) + } + + apiExtensionsClient, err := apiextensionsclient.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster api extensions client: %w", err) + } + + operatorClient, err := operatorclient.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster operator client: %w", err) + } + + clusterClient, err := clusterclient.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster cluster client: %w", err) + } + + workClient, err := workv1client.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster work client: %w", err) + } + + addonClient, err := addonclient.NewForConfig(clusterCfg) + if err != nil { + return nil, fmt.Errorf("failed to create managed cluster addon client: %w", err) + } + + return &OCMClients{ + KubeClient: kubeClient, + APIExtensionsClient: apiExtensionsClient, + OperatorClient: operatorClient, + ClusterClient: clusterClient, + WorkClient: workClient, + AddonClient: addonClient, + DynamicClient: dynamicClient, + RestMapper: restMapper, + }, nil +} diff --git a/test/framework/deployment.go b/test/framework/deployment.go new file mode 100644 index 000000000..7a3d9ca29 --- /dev/null +++ b/test/framework/deployment.go @@ -0,0 +1,22 @@ +package framework + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +func CheckDeploymentReady(ctx context.Context, kubeClient kubernetes.Interface, namespace, name string) error { + deployment, err := kubeClient.AppsV1().Deployments(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get deployment %s: %w", name, err) + } + + if deployment.Status.ReadyReplicas != deployment.Status.Replicas { + return fmt.Errorf("deployment %s is not ready, ready replicas: %d, replicas: %d", name, deployment.Status.ReadyReplicas, deployment.Status.Replicas) + } + + return nil +} diff --git a/test/framework/featuregate.go b/test/framework/featuregate.go new file mode 100644 index 000000000..322aaf93b --- /dev/null +++ b/test/framework/featuregate.go @@ -0,0 +1,78 @@ +package framework + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + ocmfeature "open-cluster-management.io/api/feature" + operatorapiv1 "open-cluster-management.io/api/operator/v1" +) + +func (hub *Hub) EnableHubWorkFeature(feature string) error { + cm, err := hub.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), hub.ClusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + + if cm.Spec.WorkConfiguration == nil { + cm.Spec.WorkConfiguration = &operatorapiv1.WorkConfiguration{} + } + + if len(cm.Spec.WorkConfiguration.FeatureGates) == 0 { + cm.Spec.WorkConfiguration.FeatureGates = make([]operatorapiv1.FeatureGate, 0) + } + + for idx, f := range cm.Spec.WorkConfiguration.FeatureGates { + if f.Feature == feature { + if f.Mode == operatorapiv1.FeatureGateModeTypeEnable { + return nil + } + cm.Spec.WorkConfiguration.FeatureGates[idx].Mode = operatorapiv1.FeatureGateModeTypeEnable + _, err = hub.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), cm, metav1.UpdateOptions{}) + return err + } + } + + featureGate := operatorapiv1.FeatureGate{ + Feature: feature, + Mode: operatorapiv1.FeatureGateModeTypeEnable, + } + + cm.Spec.WorkConfiguration.FeatureGates = append(cm.Spec.WorkConfiguration.FeatureGates, featureGate) + _, err = hub.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), cm, metav1.UpdateOptions{}) + return err +} + +func (hub *Hub) RemoveHubWorkFeature(feature string) error { + clusterManager, err := hub.OperatorClient.OperatorV1().ClusterManagers().Get(context.TODO(), hub.ClusterManagerName, metav1.GetOptions{}) + if err != nil { + return err + } + for indx, fg := range clusterManager.Spec.WorkConfiguration.FeatureGates { + if fg.Feature == feature { + clusterManager.Spec.WorkConfiguration.FeatureGates[indx].Mode = operatorapiv1.FeatureGateModeTypeDisable + break + } + } + _, err = hub.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), clusterManager, metav1.UpdateOptions{}) + return err +} + +func (hub *Hub) EnableAutoApprove(users []string) error { + cm, err := hub.GetCluserManager() + if err != nil { + return fmt.Errorf("failed to get cluster manager: %w", err) + } + if cm.Spec.RegistrationConfiguration == nil { + cm.Spec.RegistrationConfiguration = &operatorapiv1.RegistrationHubConfiguration{} + } + cm.Spec.RegistrationConfiguration.FeatureGates = append(cm.Spec.RegistrationConfiguration.FeatureGates, operatorapiv1.FeatureGate{ + Feature: string(ocmfeature.ManagedClusterAutoApproval), + Mode: operatorapiv1.FeatureGateModeTypeEnable, + }) + cm.Spec.RegistrationConfiguration.AutoApproveUsers = users + _, err = hub.OperatorClient.OperatorV1().ClusterManagers().Update(context.TODO(), cm, metav1.UpdateOptions{}) + return err +} diff --git a/test/framework/hub.go b/test/framework/hub.go new file mode 100644 index 000000000..db117672e --- /dev/null +++ b/test/framework/hub.go @@ -0,0 +1,95 @@ +package framework + +import ( + "context" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/rest" + clientcmd "k8s.io/client-go/tools/clientcmd" + + ocmfeature "open-cluster-management.io/api/feature" + + "open-cluster-management.io/ocm/pkg/operator/helpers" +) + +// Hub represents a hub cluster, it holds: +// * the clients to interact with the hub cluster +// * the metadata of the hub +// * the runtime data of the hub +type Hub struct { + *OCMClients + ClusterManagerName string + ClusterManagerNamespace string + ClusterCfg *rest.Config +} + +func NewHub(kubeconfig string) (*Hub, error) { + clusterCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, err + } + clients, err := NewOCMClients(clusterCfg) + if err != nil { + return nil, err + } + return &Hub{ + OCMClients: clients, + // the name of the ClusterManager object is constantly "cluster-manager" at the moment; The same name as deploy/cluster-manager/config/samples + ClusterManagerName: "cluster-manager", + ClusterManagerNamespace: helpers.ClusterManagerDefaultNamespace, + ClusterCfg: clusterCfg, + }, nil +} + +func (hub *Hub) CheckHubReady() error { + ctx := context.TODO() + + cm, err := hub.GetCluserManager() + if err != nil { + return fmt.Errorf("failed to get cluster manager: %w", err) + } + + err = CheckClusterManagerStatus(cm) + if err != nil { + return fmt.Errorf("failed to check cluster manager status: %w", err) + } + + // make sure open-cluster-management-hub namespace is created + if _, err := hub.KubeClient.CoreV1().Namespaces(). + Get(context.TODO(), hub.ClusterManagerNamespace, metav1.GetOptions{}); err != nil { + return err + } + + // make sure deployments are ready + deployments := []string{ + fmt.Sprintf("%s-registration-controller", hub.ClusterManagerName), + fmt.Sprintf("%s-registration-webhook", hub.ClusterManagerName), + fmt.Sprintf("%s-work-webhook", hub.ClusterManagerName), + fmt.Sprintf("%s-placement-controller", hub.ClusterManagerName), + } + for _, deployment := range deployments { + if err = CheckDeploymentReady(ctx, hub.KubeClient, hub.ClusterManagerNamespace, deployment); err != nil { + return fmt.Errorf("failed to check deployment %s: %w", deployment, err) + } + } + + // if manifestworkreplicaset feature is enabled, check the work controller + if cm.Spec.WorkConfiguration != nil && + helpers.FeatureGateEnabled(cm.Spec.WorkConfiguration.FeatureGates, ocmfeature.DefaultHubWorkFeatureGates, ocmfeature.ManifestWorkReplicaSet) { + if err = CheckDeploymentReady(ctx, hub.KubeClient, hub.ClusterManagerNamespace, fmt.Sprintf("%s-work-controller", hub.ClusterManagerName)); err != nil { + return fmt.Errorf("failed to check work controller: %w", err) + } + } + + // if addonManager feature is enabled, check the addonManager controller + if cm.Spec.AddOnManagerConfiguration != nil && + helpers.FeatureGateEnabled(cm.Spec.AddOnManagerConfiguration.FeatureGates, ocmfeature.DefaultHubAddonManagerFeatureGates, ocmfeature.AddonManagement) { + if err = CheckDeploymentReady(ctx, hub.KubeClient, hub.ClusterManagerNamespace, + fmt.Sprintf("%s-addon-manager-controller", hub.ClusterManagerName)); err != nil { + return fmt.Errorf("failed to check addon manager controller: %w", err) + } + } + + return nil +} diff --git a/test/framework/klusterlet.go b/test/framework/klusterlet.go new file mode 100644 index 000000000..ac3fb4b12 --- /dev/null +++ b/test/framework/klusterlet.go @@ -0,0 +1,257 @@ +package framework + +import ( + "context" + "fmt" + + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/klog/v2" + + operatorapiv1 "open-cluster-management.io/api/operator/v1" + + "open-cluster-management.io/ocm/pkg/operator/helpers" +) + +// CreateAndApproveKlusterlet requires operations on both hub side and spoke side +func CreateAndApproveKlusterlet( + hub *Hub, spoke *Spoke, + klusterletName, managedClusterName, klusterletNamespace string, + mode operatorapiv1.InstallMode, + bootstrapHubKubeConfigSecret *corev1.Secret, + images Images, +) { + // on the spoke side + _, err := spoke.CreateKlusterlet( + klusterletName, + managedClusterName, + klusterletNamespace, + mode, + bootstrapHubKubeConfigSecret, + images, + ) + Expect(err).ToNot(HaveOccurred()) + + // on the hub side + Eventually(func() error { + _, err := hub.GetManagedCluster(managedClusterName) + return err + }).Should(Succeed()) + + Eventually(func() error { + return hub.ApproveManagedClusterCSR(managedClusterName) + }).Should(Succeed()) + + Eventually(func() error { + return hub.AcceptManageCluster(managedClusterName) + }).Should(Succeed()) + + Eventually(func() error { + return hub.CheckManagedClusterStatus(managedClusterName) + }).Should(Succeed()) +} + +func (spoke *Spoke) CreateKlusterlet( + name, clusterName, klusterletNamespace string, + mode operatorapiv1.InstallMode, + bootstrapHubKubeConfigSecret *corev1.Secret, + images Images) (*operatorapiv1.Klusterlet, error) { + if name == "" { + return nil, fmt.Errorf("the name should not be null") + } + if klusterletNamespace == "" { + klusterletNamespace = helpers.KlusterletDefaultNamespace + } + + var klusterlet = &operatorapiv1.Klusterlet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: operatorapiv1.KlusterletSpec{ + RegistrationImagePullSpec: images.RegistrationImage, + WorkImagePullSpec: images.WorkImage, + ImagePullSpec: images.SingletonImage, + ExternalServerURLs: []operatorapiv1.ServerURL{ + { + URL: "https://localhost", + }, + }, + ClusterName: clusterName, + Namespace: klusterletNamespace, + DeployOption: operatorapiv1.KlusterletDeployOption{ + Mode: mode, + }, + }, + } + + agentNamespace := helpers.AgentNamespace(klusterlet) + klog.Infof("klusterlet: %s/%s, \t mode: %v, \t agent namespace: %s", klusterlet.Name, klusterlet.Namespace, mode, agentNamespace) + + // create agentNamespace + namespace := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: agentNamespace, + Annotations: map[string]string{ + "workload.openshift.io/allowed": "management", + }, + }, + } + if _, err := spoke.KubeClient.CoreV1().Namespaces().Get(context.TODO(), agentNamespace, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + klog.Errorf("failed to get ns %v. %v", agentNamespace, err) + return nil, err + } + + if _, err := spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), + namespace, metav1.CreateOptions{}); err != nil { + klog.Errorf("failed to create ns %v. %v", namespace, err) + return nil, err + } + } + + // create bootstrap-hub-kubeconfig secret + secret := bootstrapHubKubeConfigSecret.DeepCopy() + if _, err := spoke.KubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + klog.Errorf("failed to get secret %v in ns %v. %v", secret.Name, agentNamespace, err) + return nil, err + } + if _, err = spoke.KubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + klog.Errorf("failed to create secret %v in ns %v. %v", secret, agentNamespace, err) + return nil, err + } + } + + if helpers.IsHosted(mode) { + // create external-managed-kubeconfig, will use the same cluster to simulate the Hosted mode. + secret.Namespace = agentNamespace + secret.Name = helpers.ExternalManagedKubeConfig + if _, err := spoke.KubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), secret.Name, metav1.GetOptions{}); err != nil { + if !apierrors.IsNotFound(err) { + klog.Errorf("failed to get secret %v in ns %v. %v", secret.Name, agentNamespace, err) + return nil, err + } + if _, err = spoke.KubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil { + klog.Errorf("failed to create secret %v in ns %v. %v", secret, agentNamespace, err) + return nil, err + } + } + } + + // create klusterlet CR + realKlusterlet, err := spoke.OperatorClient.OperatorV1().Klusterlets().Create(context.TODO(), + klusterlet, metav1.CreateOptions{}) + if err != nil && !apierrors.IsAlreadyExists(err) { + klog.Errorf("failed to create klusterlet %v . %v", klusterlet.Name, err) + return nil, err + } + + return realKlusterlet, nil +} + +func (spoke *Spoke) CreatePureHostedKlusterlet(name, clusterName string) (*operatorapiv1.Klusterlet, error) { + if name == "" { + return nil, fmt.Errorf("the name should not be null") + } + + var klusterlet = &operatorapiv1.Klusterlet{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: operatorapiv1.KlusterletSpec{ + RegistrationImagePullSpec: "quay.io/open-cluster-management/registration:latest", + WorkImagePullSpec: "quay.io/open-cluster-management/work:latest", + ExternalServerURLs: []operatorapiv1.ServerURL{ + { + URL: "https://localhost", + }, + }, + ClusterName: clusterName, + DeployOption: operatorapiv1.KlusterletDeployOption{ + Mode: operatorapiv1.InstallModeHosted, + }, + }, + } + + // create klusterlet CR + realKlusterlet, err := spoke.OperatorClient.OperatorV1().Klusterlets().Create(context.TODO(), + klusterlet, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("failed to create klusterlet %v . %v", klusterlet.Name, err) + return nil, err + } + + return realKlusterlet, nil +} + +func (spoke *Spoke) CheckKlusterletStatus(klusterletName, condType, reason string, status metav1.ConditionStatus) error { + klusterlet, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) + if err != nil { + return err + } + + cond := meta.FindStatusCondition(klusterlet.Status.Conditions, condType) + if cond == nil { + return fmt.Errorf("cannot find condition type %s", condType) + } + + if cond.Reason != reason { + return fmt.Errorf("condition reason is not matched, expect %s, got %s", reason, cond.Reason) + } + + if cond.Status != status { + return fmt.Errorf("condition status is not matched, expect %s, got %s", status, cond.Status) + } + + return nil +} + +// CleanKlusterletRelatedResources needs both hub side and spoke side operations +func CleanKlusterletRelatedResources( + hub *Hub, spoke *Spoke, + klusterletName, managedClusterName string) { + Expect(klusterletName).NotTo(Equal("")) + + // clean the klusterlet + err := spoke.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return + } + Expect(err).To(BeNil()) + + Eventually(func() error { + _, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.Infof("klusterlet %s deleted successfully", klusterletName) + return nil + } + if err != nil { + klog.Infof("get klusterlet %s error: %v", klusterletName, err) + return err + } + return fmt.Errorf("klusterlet %s still exists", klusterletName) + }).Should(Succeed()) + + // clean the managed clusters + err = hub.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), managedClusterName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return + } + Expect(err).To(BeNil()) + + Eventually(func() error { + _, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), managedClusterName, metav1.GetOptions{}) + if apierrors.IsNotFound(err) { + klog.Infof("managed cluster %s deleted successfully", managedClusterName) + return nil + } + if err != nil { + klog.Infof("get managed cluster %s error: %v", klusterletName, err) + return err + } + return fmt.Errorf("managed cluster %s still exists", managedClusterName) + }).Should(Succeed()) +} diff --git a/test/framework/kubeconfig.go b/test/framework/kubeconfig.go new file mode 100644 index 000000000..16ad50d64 --- /dev/null +++ b/test/framework/kubeconfig.go @@ -0,0 +1,94 @@ +package framework + +import ( + "context" + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + clientcmd "k8s.io/client-go/tools/clientcmd" + "k8s.io/klog/v2" + + operatorapiv1 "open-cluster-management.io/api/operator/v1" + + "open-cluster-management.io/ocm/pkg/operator/helpers" +) + +func (spoke *Spoke) DeleteExternalKubeconfigSecret(klusterlet *operatorapiv1.Klusterlet) error { + agentNamespace := helpers.AgentNamespace(klusterlet) + err := spoke.KubeClient.CoreV1().Secrets(agentNamespace).Delete(context.TODO(), + helpers.ExternalManagedKubeConfig, metav1.DeleteOptions{}) + if err != nil { + klog.Errorf("failed to delete external managed secret in ns %v. %v", agentNamespace, err) + return err + } + + return nil +} + +func (spoke *Spoke) CreateFakeExternalKubeconfigSecret(klusterlet *operatorapiv1.Klusterlet) error { + agentNamespace := helpers.AgentNamespace(klusterlet) + klog.Infof("klusterlet: %s/%s, \t, \t agent namespace: %s", + klusterlet.Name, klusterlet.Namespace, agentNamespace) + + bsSecret, err := spoke.KubeClient.CoreV1().Secrets(agentNamespace).Get(context.TODO(), + helpers.BootstrapHubKubeConfig, metav1.GetOptions{}) + if err != nil { + klog.Errorf("failed to get bootstrap secret %v in ns %v. %v", bsSecret, agentNamespace, err) + return err + } + + // create external-managed-kubeconfig, will use the same cluster to simulate the Hosted mode. + secret, err := changeHostOfKubeconfigSecret(*bsSecret, "https://kube-apiserver.i-am-a-fake-server:6443") + if err != nil { + klog.Errorf("failed to change host of the kubeconfig secret in. %v", err) + return err + } + secret.Namespace = agentNamespace + secret.Name = helpers.ExternalManagedKubeConfig + secret.ResourceVersion = "" + + _, err = spoke.KubeClient.CoreV1().Secrets(agentNamespace).Create(context.TODO(), secret, metav1.CreateOptions{}) + if err != nil { + klog.Errorf("failed to create external managed secret %v in ns %v. %v", bsSecret, agentNamespace, err) + return err + } + + return nil +} + +func changeHostOfKubeconfigSecret(secret corev1.Secret, apiServerURL string) (*corev1.Secret, error) { + kubeconfigData, ok := secret.Data["kubeconfig"] + if !ok { + return nil, fmt.Errorf("kubeconfig not found") + } + + if kubeconfigData == nil { + return nil, fmt.Errorf("failed to get kubeconfig from secret: %s", secret.GetName()) + } + + kubeconfig, err := clientcmd.Load(kubeconfigData) + if err != nil { + return nil, fmt.Errorf("failed to load kubeconfig from secret: %s", secret.GetName()) + } + + if len(kubeconfig.Clusters) == 0 { + return nil, fmt.Errorf("there is no cluster in kubeconfig from secret: %s", secret.GetName()) + } + + for k := range kubeconfig.Clusters { + kubeconfig.Clusters[k].Server = apiServerURL + } + + newKubeconfig, err := clientcmd.Write(*kubeconfig) + if err != nil { + return nil, fmt.Errorf("failed to write new kubeconfig to secret: %s", secret.GetName()) + } + + secret.Data = map[string][]byte{ + "kubeconfig": newKubeconfig, + } + + klog.Infof("Set the cluster server URL in %s secret with apiServerURL %s", secret.Name, apiServerURL) + return &secret, nil +} diff --git a/test/framework/managedcluster.go b/test/framework/managedcluster.go new file mode 100644 index 000000000..bb662575d --- /dev/null +++ b/test/framework/managedcluster.go @@ -0,0 +1,190 @@ +package framework + +import ( + "context" + "fmt" + + . "github.com/onsi/gomega" + certificatesv1 "k8s.io/api/certificates/v1" + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "open-cluster-management.io/api/cluster/v1" +) + +func (hub *Hub) GetManagedCluster(clusterName string) (*clusterv1.ManagedCluster, error) { + if clusterName == "" { + return nil, fmt.Errorf("the name of managedcluster should not be null") + } + return hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) +} + +func (hub *Hub) CheckManagedClusterStatus(clusterName string) error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), + clusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + var okCount = 0 + for _, condition := range managedCluster.Status.Conditions { + if (condition.Type == clusterv1.ManagedClusterConditionHubAccepted || + condition.Type == clusterv1.ManagedClusterConditionJoined || + condition.Type == clusterv1.ManagedClusterConditionAvailable) && + condition.Status == metav1.ConditionTrue { + okCount++ + } + } + + if okCount == 3 { + return nil + } + + return fmt.Errorf("cluster %s condtions are not ready: %v", clusterName, managedCluster.Status.Conditions) +} + +func (hub *Hub) CheckManagedClusterStatusConditions(clusterName string, + expectedConditions map[string]metav1.ConditionStatus) error { + if clusterName == "" { + return fmt.Errorf("the name of managedcluster should not be null") + } + + cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + // expect the managed cluster to be not available + for conditionType, conditionStatus := range expectedConditions { + condition := meta.FindStatusCondition(cluster.Status.Conditions, conditionType) + if condition == nil { + return fmt.Errorf("managed cluster %s is not in expected status, expect %s to be %s, but not found", + clusterName, conditionType, conditionStatus) + } + if condition.Status != conditionStatus { + return fmt.Errorf("managed cluster %s is not in expected status, expect %s to be %s, but got %s", + clusterName, conditionType, conditionStatus, condition.Status) + } + } + + return nil +} + +func (hub *Hub) DeleteManageClusterAndRelatedNamespace(clusterName string) error { + Eventually(func() error { + err := hub.ClusterClient.ClusterV1().ManagedClusters().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + return err + }).Should(Succeed()) + + // delete namespace created by hub automatically + Eventually(func() error { + err := hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), clusterName, metav1.DeleteOptions{}) + // some managed cluster just created, but the csr is not approved, + // so there is not a related namespace + if apierrors.IsNotFound(err) { + return nil + } + return err + }).Should(Succeed()) + + return nil +} + +func (hub *Hub) AcceptManageCluster(clusterName string) error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), + clusterName, metav1.GetOptions{}) + if err != nil { + return err + } + + managedCluster.Spec.HubAcceptsClient = true + managedCluster.Spec.LeaseDurationSeconds = 5 + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), + managedCluster, metav1.UpdateOptions{}) + return err +} + +func (hub *Hub) ApproveManagedClusterCSR(clusterName string) error { + var csrs *certificatesv1.CertificateSigningRequestList + var csrClient = hub.KubeClient.CertificatesV1().CertificateSigningRequests() + var err error + + if csrs, err = csrClient.List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("open-cluster-management.io/cluster-name = %v", clusterName)}); err != nil { + return err + } + if len(csrs.Items) == 0 { + return fmt.Errorf("there is no csr related cluster %v", clusterName) + } + + for i := range csrs.Items { + csr := &csrs.Items[i] + if csr, err = csrClient.Get(context.TODO(), csr.Name, metav1.GetOptions{}); err != nil { + return err + } + + if isCSRInTerminalState(&csr.Status) { + continue + } + + csr.Status.Conditions = append(csr.Status.Conditions, certificatesv1.CertificateSigningRequestCondition{ + Type: certificatesv1.CertificateApproved, + Status: corev1.ConditionTrue, + Reason: "Approved by E2E", + Message: "Approved as part of e2e", + }) + _, err = csrClient.UpdateApproval(context.TODO(), csr.Name, csr, metav1.UpdateOptions{}) + if err != nil { + return err + } + } + return nil +} + +func isCSRInTerminalState(status *certificatesv1.CertificateSigningRequestStatus) bool { + for _, c := range status.Conditions { + if c.Type == certificatesv1.CertificateApproved { + return true + } + if c.Type == certificatesv1.CertificateDenied { + return true + } + } + return false +} + +func (hub *Hub) SetHubAcceptsClient(clusterName string, hubAcceptClient bool) error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get managed cluster: %w", err) + } + + if managedCluster.Spec.HubAcceptsClient != hubAcceptClient { + managedCluster.Spec.HubAcceptsClient = hubAcceptClient + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update managed cluster: %w", err) + } + } + + return nil +} + +func (hub *Hub) SetLeaseDurationSeconds(clusterName string, leaseDurationSeconds int32) error { + managedCluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{}) + if err != nil { + return fmt.Errorf("failed to get managed cluster: %w", err) + } + + managedCluster.Spec.LeaseDurationSeconds = leaseDurationSeconds + _, err = hub.ClusterClient.ClusterV1().ManagedClusters().Update(context.TODO(), managedCluster, metav1.UpdateOptions{}) + if err != nil { + return fmt.Errorf("failed to update managed cluster: %w", err) + } + return nil +} diff --git a/test/framework/managedclusteraddon.go b/test/framework/managedclusteraddon.go new file mode 100644 index 000000000..cdd35ed44 --- /dev/null +++ b/test/framework/managedclusteraddon.go @@ -0,0 +1,78 @@ +package framework + +import ( + "context" + "fmt" + "time" + + coordv1 "k8s.io/api/coordination/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" +) + +func (hub *Hub) CreateManagedClusterAddOn(managedClusterNamespace, addOnName, installNamespace string) error { + _, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterNamespace).Create( + context.TODO(), + &addonv1alpha1.ManagedClusterAddOn{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: managedClusterNamespace, + Name: addOnName, + }, + Spec: addonv1alpha1.ManagedClusterAddOnSpec{ + InstallNamespace: installNamespace, + }, + }, + metav1.CreateOptions{}, + ) + return err +} + +func (hub *Hub) CreateManagedClusterAddOnLease(addOnInstallNamespace, addOnName string) error { + if _, err := hub.KubeClient.CoreV1().Namespaces().Create( + context.TODO(), + &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: addOnInstallNamespace, + }, + }, + metav1.CreateOptions{}, + ); err != nil { + return err + } + + _, err := hub.KubeClient.CoordinationV1().Leases(addOnInstallNamespace).Create( + context.TODO(), + &coordv1.Lease{ + ObjectMeta: metav1.ObjectMeta{ + Name: addOnName, + Namespace: addOnInstallNamespace, + }, + Spec: coordv1.LeaseSpec{ + RenewTime: &metav1.MicroTime{Time: time.Now()}, + }, + }, + metav1.CreateOptions{}, + ) + return err +} + +func (hub *Hub) CheckManagedClusterAddOnStatus(managedClusterNamespace, addOnName string) error { + addOn, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(managedClusterNamespace).Get(context.TODO(), addOnName, metav1.GetOptions{}) + if err != nil { + return err + } + + if addOn.Status.Conditions == nil { + return fmt.Errorf("there is no conditions in addon %v/%v", managedClusterNamespace, addOnName) + } + + if !meta.IsStatusConditionTrue(addOn.Status.Conditions, "Available") { + return fmt.Errorf("the addon %v/%v available condition is not true, %v", + managedClusterNamespace, addOnName, addOn.Status.Conditions) + } + + return nil +} diff --git a/test/framework/manifestwork.go b/test/framework/manifestwork.go new file mode 100644 index 000000000..3fd1a6478 --- /dev/null +++ b/test/framework/manifestwork.go @@ -0,0 +1,26 @@ +package framework + +import ( + "context" + + . "github.com/onsi/gomega" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func (hub *Hub) CleanManifestWorks(clusterName, workName string) error { + err := hub.WorkClient.WorkV1().ManifestWorks(clusterName).Delete(context.Background(), workName, metav1.DeleteOptions{}) + if apierrors.IsNotFound(err) { + return nil + } + if err != nil { + return err + } + + Eventually(func() bool { + _, err := hub.WorkClient.WorkV1().ManifestWorks(clusterName).Get(context.Background(), workName, metav1.GetOptions{}) + return apierrors.IsNotFound(err) + }).Should(BeTrue()) + + return nil +} diff --git a/test/framework/spoke.go b/test/framework/spoke.go new file mode 100644 index 000000000..0d2627342 --- /dev/null +++ b/test/framework/spoke.go @@ -0,0 +1,35 @@ +package framework + +import ( + clientcmd "k8s.io/client-go/tools/clientcmd" +) + +// Spoke represents a spoke cluster, it holds: +// * the clients to interact with the spoke cluster +// * the metadata of the spoke +// * the runtime data of the spoke +type Spoke struct { + *OCMClients + // Note: this is the namespace and name where the KlusterletOperator deployment is created, which + // is different from the klusterlet namespace and name. + KlusterletOperatorNamespace string + KlusterletOperator string +} + +func NewSpoke(kubeconfig string) (*Spoke, error) { + clusterCfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) + if err != nil { + return nil, err + } + clients, err := NewOCMClients(clusterCfg) + if err != nil { + return nil, err + } + return &Spoke{ + OCMClients: clients, + // the name of the KlusterletOperator object is constantly "klusterlet-operator" at the moment; + // The same name as deploy/klusterlet/config/operator/operator.yaml + KlusterletOperatorNamespace: "open-cluster-management", + KlusterletOperator: "klusterlet", + }, nil +}