Skip to content

Commit

Permalink
Refactor tester to e2e framework.
Browse files Browse the repository at this point in the history
Signed-off-by: xuezhaojun <zxue@redhat.com>
  • Loading branch information
xuezhaojun committed Jul 12, 2024
1 parent 9a7efae commit c7a262c
Show file tree
Hide file tree
Showing 27 changed files with 1,758 additions and 1,643 deletions.
69 changes: 35 additions & 34 deletions test/e2e/addon_lease_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@ import (

clusterv1 "open-cluster-management.io/api/cluster/v1"
operatorapiv1 "open-cluster-management.io/api/operator/v1"

"open-cluster-management.io/ocm/test/framework"
)

const availableLabelValue = "available"
Expand All @@ -28,12 +30,12 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
// create an addon on created managed cluster
addOnName = fmt.Sprintf("addon-%s", rand.String(6))
ginkgo.By(fmt.Sprintf("Creating managed cluster addon %q", addOnName))
err := t.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName)
err := hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName)
gomega.Expect(err).ToNot(gomega.HaveOccurred())

// create addon installation namespace
ginkgo.By(fmt.Sprintf("Creating managed cluster addon installation namespace %q", addOnName))
_, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
_, err = spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: addOnName,
},
Expand All @@ -43,13 +45,13 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName))
err := t.SpokeKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
err := spoke.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})

ginkgo.It("Should keep addon status to available", func() {
ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName))
_, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
_, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: addOnName,
Namespace: addOnName,
Expand All @@ -61,7 +63,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand All @@ -73,7 +75,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// check if the cluster has a label for addon with expected value
gomega.Eventually(func() bool {
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
Expand All @@ -87,7 +89,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

ginkgo.It("Should update addon status to unavailable if addon stops to update its lease", func() {
ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName))
_, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
_, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: addOnName,
Namespace: addOnName,
Expand All @@ -99,7 +101,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand All @@ -111,7 +113,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// check if the cluster has a label for addon with expected value
gomega.Eventually(func() bool {
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
Expand All @@ -123,14 +125,14 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
}).Should(gomega.BeTrue())

ginkgo.By(fmt.Sprintf("Updating lease %q with a past time", addOnName))
lease, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Get(context.TODO(), addOnName, metav1.GetOptions{})
lease, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Get(context.TODO(), addOnName, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
lease.Spec.RenewTime = &metav1.MicroTime{Time: time.Now().Add(-10 * time.Minute)}
_, err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Update(context.TODO(), lease, metav1.UpdateOptions{})
_, err = spoke.KubeClient.CoordinationV1().Leases(addOnName).Update(context.TODO(), lease, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand All @@ -142,7 +144,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// check if the cluster has a label for addon with expected value
gomega.Eventually(func() bool {
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
Expand All @@ -156,7 +158,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

ginkgo.It("Should update addon status to unknown if there is no lease for this addon", func() {
ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName))
_, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
_, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: addOnName,
Namespace: addOnName,
Expand All @@ -168,7 +170,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand All @@ -180,7 +182,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// check if the cluster has a label for addon with expected value
gomega.Eventually(func() bool {
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
Expand All @@ -192,11 +194,11 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
}).Should(gomega.BeTrue())

ginkgo.By(fmt.Sprintf("Deleting lease %q", addOnName))
err = t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
err = spoke.KubeClient.CoordinationV1().Leases(addOnName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand All @@ -208,7 +210,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// check if the cluster has a label for addon with expected value
gomega.Eventually(func() bool {
cluster, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
cluster, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), universalClusterName, metav1.GetOptions{})
if err != nil {
return false
}
Expand All @@ -227,18 +229,17 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
klusterletName = fmt.Sprintf("e2e-klusterlet-%s", rand.String(6))
clusterName = fmt.Sprintf("e2e-managedcluster-%s", rand.String(6))
agentNamespace := fmt.Sprintf("open-cluster-management-agent-%s", rand.String(6))
_, err := t.CreateApprovedKlusterlet(
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode))
gomega.Expect(err).ToNot(gomega.HaveOccurred())
framework.CreateAndApproveKlusterlet(
hub, spoke,
klusterletName, clusterName, agentNamespace, operatorapiv1.InstallMode(klusterletDeployMode), bootstrapHubKubeConfigSecret, images)
// create an addon on created managed cluster
addOnName = fmt.Sprintf("addon-%s", rand.String(6))
ginkgo.By(fmt.Sprintf("Creating managed cluster addon %q", addOnName))
err = t.CreateManagedClusterAddOn(clusterName, addOnName, addOnName)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
gomega.Expect(hub.CreateManagedClusterAddOn(clusterName, addOnName, addOnName)).ToNot(gomega.HaveOccurred())

// create addon installation namespace
ginkgo.By(fmt.Sprintf("Creating managed cluster addon installation namespace %q", addOnName))
_, err = t.SpokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
_, err := spoke.KubeClient.CoreV1().Namespaces().Create(context.TODO(), &corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: addOnName,
},
Expand All @@ -248,15 +249,15 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

ginkgo.AfterEach(func() {
ginkgo.By(fmt.Sprintf("Cleaning managed cluster addon installation namespace %q", addOnName))
err := t.HubKubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
err := hub.KubeClient.CoreV1().Namespaces().Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
ginkgo.By(fmt.Sprintf("clean klusterlet %v resources after the test case", klusterletName))
gomega.Expect(t.cleanKlusterletResources(klusterletName, clusterName)).To(gomega.BeNil())
framework.CleanKlusterletRelatedResources(hub, spoke, klusterletName, clusterName)
})

ginkgo.It("Should update addon status to unknown if managed cluster stops to update its lease", func() {
ginkgo.By(fmt.Sprintf("Creating lease %q for managed cluster addon %q", addOnName, addOnName))
_, err := t.SpokeKubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
_, err := spoke.KubeClient.CoordinationV1().Leases(addOnName).Create(context.TODO(), &coordv1.Lease{
ObjectMeta: metav1.ObjectMeta{
Name: addOnName,
Namespace: addOnName,
Expand All @@ -268,7 +269,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand All @@ -284,11 +285,11 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// delete registration agent to stop agent update its status
ginkgo.By("Stoping klusterlet")
err = t.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{})
err = spoke.OperatorClient.OperatorV1().Klusterlets().Delete(context.TODO(), klusterletName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
_, err := t.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{})
_, err := spoke.OperatorClient.OperatorV1().Klusterlets().Get(context.TODO(), klusterletName, metav1.GetOptions{})
if errors.IsNotFound(err) {
klog.Infof("klusterlet %s deleted successfully", klusterletName)
return nil
Expand All @@ -302,7 +303,7 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(

// for speeding up test, update managed cluster status to unknown manually
ginkgo.By(fmt.Sprintf("Updating managed cluster %s status to unknown", clusterName))
found, err := t.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
found, err := hub.ClusterClient.ClusterV1().ManagedClusters().Get(context.TODO(), clusterName, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
found.Status = clusterv1.ManagedClusterStatus{
Conditions: []metav1.Condition{
Expand All @@ -315,11 +316,11 @@ var _ = ginkgo.Describe("Addon Health Check", ginkgo.Label("addon-lease"), func(
},
},
}
_, err = t.ClusterClient.ClusterV1().ManagedClusters().UpdateStatus(context.TODO(), found, metav1.UpdateOptions{})
_, err = hub.ClusterClient.ClusterV1().ManagedClusters().UpdateStatus(context.TODO(), found, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
found, err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
found, err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName).Get(context.TODO(), addOnName, metav1.GetOptions{})
if err != nil {
return err
}
Expand Down
14 changes: 7 additions & 7 deletions test/e2e/addon_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,37 +17,37 @@ var _ = Describe("Manage the managed cluster addons", Label("addon"), func() {
})

AfterEach(func() {
err := t.AddOnClinet.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
err := hub.AddonClient.AddonV1alpha1().ManagedClusterAddOns(universalClusterName).Delete(context.TODO(), addOnName, metav1.DeleteOptions{})
Expect(err).ToNot(HaveOccurred())
})

It("Create one managed cluster addon and make sure it is available", func() {
By(fmt.Sprintf("create the addon %v on the managed cluster namespace %v", addOnName, universalClusterName))
err := t.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName)
err := hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName)
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("create the addon lease %v on addon install namespace %v", addOnName, addOnName))
err = t.CreateManagedClusterAddOnLease(addOnName, addOnName)
err = hub.CreateManagedClusterAddOnLease(addOnName, addOnName)
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("wait the addon %v available condition to be true", addOnName))
Eventually(func() error {
return t.CheckManagedClusterAddOnStatus(universalClusterName, addOnName)
return hub.CheckManagedClusterAddOnStatus(universalClusterName, addOnName)
}).Should(Succeed())
})

It("Create one managed cluster addon and make sure it is available in Hosted mode", func() {
By(fmt.Sprintf("create the addon %v on the managed cluster namespace %v", addOnName, universalClusterName))
err := t.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName)
err := hub.CreateManagedClusterAddOn(universalClusterName, addOnName, addOnName)
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("create the addon lease %v on addon install namespace %v", addOnName, addOnName))
err = t.CreateManagedClusterAddOnLease(addOnName, addOnName)
err = hub.CreateManagedClusterAddOnLease(addOnName, addOnName)
Expect(err).ToNot(HaveOccurred())

By(fmt.Sprintf("wait the addon %v available condition to be true", addOnName))
Eventually(func() error {
return t.CheckManagedClusterAddOnStatus(universalClusterName, addOnName)
return hub.CheckManagedClusterAddOnStatus(universalClusterName, addOnName)
}).Should(Succeed())
})
})
Loading

0 comments on commit c7a262c

Please sign in to comment.