diff --git a/test/e2e/aggregatedapi_test.go b/test/e2e/aggregatedapi_test.go index 70501324bf50..3780bbff50d8 100644 --- a/test/e2e/aggregatedapi_test.go +++ b/test/e2e/aggregatedapi_test.go @@ -166,7 +166,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) }) - ginkgo.When("Serviceaccount(tom) access the member1 cluster", func() { + ginkgo.When(fmt.Sprintf("Serviceaccount(tom) access the %s cluster", member1), func() { var clusterClient kubernetes.Interface ginkgo.BeforeEach(func() { @@ -175,10 +175,10 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f }) ginkgo.BeforeEach(func() { - klog.Infof("Create ServiceAccount(%s) in the cluster(%s)", klog.KObj(tomServiceAccount).String(), member1) + klog.Infof("Create ServiceAccount(%s) in the %s cluster", klog.KObj(tomServiceAccount).String(), member1) framework.CreateServiceAccount(clusterClient, tomServiceAccount) ginkgo.DeferCleanup(func() { - klog.Infof("Delete ServiceAccount(%s) in the cluster(%s)", klog.KObj(tomServiceAccount).String(), member1) + klog.Infof("Delete ServiceAccount(%s) in the %s cluster", klog.KObj(tomServiceAccount).String(), member1) framework.RemoveServiceAccount(clusterClient, tomServiceAccount.Namespace, tomServiceAccount.Name) }) }) @@ -191,8 +191,8 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f framework.RemoveClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember.Name) }) - ginkgo.It("tom access the member cluster", func() { - ginkgo.By("access the cluster `/api` path with right", func() { + ginkgo.It("tom access the member1 cluster api with and without right", func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api` path with right", member1), func() { gomega.Eventually(func(g gomega.Gomega) (int, error) { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api", member1), tomToken) g.Expect(err).ShouldNot(gomega.HaveOccurred()) @@ -200,18 +200,18 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f }, pollTimeout, pollInterval).Should(gomega.Equal(http.StatusOK)) }) - ginkgo.By("access the cluster /api/v1/nodes path without right", func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path without right", member1), func() { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", member1), tomToken) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden)) }) - ginkgo.By("create rbac in the member1 cluster", func() { + ginkgo.By(fmt.Sprintf("create rbac in the %s cluster", member1), func() { framework.CreateClusterRole(clusterClient, tomClusterRoleOnMember) framework.CreateClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember) }) - ginkgo.By("access the member1 /api/v1/nodes path with right", func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path with right", member1), func() { gomega.Eventually(func(g gomega.Gomega) (int, error) { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", member1), tomToken) g.Expect(err).ShouldNot(gomega.HaveOccurred()) @@ -221,9 +221,9 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f }) }) - ginkgo.When("Serviceaccount(tom) access the member2 cluster", func() { - ginkgo.It("tom access the member cluster without right", func() { - ginkgo.By("access the cluster `/api` path without right", func() { + ginkgo.When(fmt.Sprintf("Serviceaccount(tom) access the %s cluster", member2), func() { + ginkgo.It("tom access the member2 cluster without right", func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api` path without right", member2), func() { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy, member2), tomToken) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden)) @@ -257,8 +257,8 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f framework.RemoveClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember.Name) }) - ginkgo.It("tom access the member cluster", func() { - ginkgo.By("access the cluster `/api` path with right", func() { + ginkgo.It("tom access the specified cluster with/without right", func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api` path with right", clusterName), func() { gomega.Eventually(func(g gomega.Gomega) (int, error) { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api", clusterName), tomToken) g.Expect(err).ShouldNot(gomega.HaveOccurred()) @@ -266,7 +266,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f }, pollTimeout, pollInterval).Should(gomega.Equal(http.StatusOK)) }) - ginkgo.By("access the cluster /api/v1/nodes path without right", func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path without right", clusterName), func() { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", clusterName), tomToken) gomega.Expect(err).ShouldNot(gomega.HaveOccurred()) gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden)) @@ -277,7 +277,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f framework.CreateClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember) }) - ginkgo.By(fmt.Sprintf("access the %s /api/v1/nodes path with right", clusterName), func() { + ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path with right", clusterName), func() { gomega.Eventually(func(g gomega.Gomega) (int, error) { code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", clusterName), tomToken) g.Expect(err).ShouldNot(gomega.HaveOccurred()) diff --git a/test/e2e/coverage_docs/E2E_coverage.md b/test/e2e/coverage_docs/E2E_coverage.md index a7fdabe250f7..27332080fe4b 100644 --- a/test/e2e/coverage_docs/E2E_coverage.md +++ b/test/e2e/coverage_docs/E2E_coverage.md @@ -1,4 +1,3 @@ ### E2E coverage of features Features: https://karmada.io/docs/key-features/features/ - diff --git a/test/e2e/coverage_docs/aggregatedapi_test.md b/test/e2e/coverage_docs/aggregatedapi_test.md new file mode 100644 index 000000000000..c7bde55fcbbf --- /dev/null +++ b/test/e2e/coverage_docs/aggregatedapi_test.md @@ -0,0 +1,8 @@ +### aggregatedapi e2e test coverage analysis + +#### Serviceaccount Access to (member1/member2/specified) cluster api with or without Permissions +| Test Case | E2E Describe Text | Comments | +|-----------------------------------------------------------------------------------------------|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------| +| Serviceaccount Access to the member1 cluster's `/api` and `api/v1/nodes` with/without right | tom access the member1 cluster api with and without right | [Aggregated Kubernetes API Endpoint](https://karmada.io/zh/docs/userguide/globalview/aggregated-api-endpoint/) | +| Serviceaccount Access to the member2 cluster's `/api` without right | tom access the member2 cluster without right | | +| Serviceaccount Access to the specified cluster's `/api` and `api/v1/nodes` with/without right | tom access the specified cluster with/without right | | diff --git a/test/e2e/coverage_docs/cronfederatedhpa_test.md b/test/e2e/coverage_docs/cronfederatedhpa_test.md new file mode 100644 index 000000000000..f40359143671 --- /dev/null +++ b/test/e2e/coverage_docs/cronfederatedhpa_test.md @@ -0,0 +1,8 @@ +### cron federated hpa e2e test coverage analysis + +| Test Case | E2E Describe Text | Comments | +|-----------------------------------------------------------------------------------------------------------|---------------------------------------------------------|--------------------------------------------------------------------------------| +| Test the scaling of FederatedHPA by creating a CronFederatedHPA rule | Test scale FederatedHPA | [FederatedHPA](https://karmada.io/zh/docs/userguide/autoscaling/federatedhpa/) | +| Test the scaling of a Deployment by creating a CronFederatedHPA rule | Test scale Deployment | | +| Test the suspend rule in CronFederatedHPA by creating a rule that is supposed to be suspended | Test suspend rule in CronFederatedHPA | | +| Test the unsuspend rule, then suspend it in CronFederatedHPA by manipulating the rule's suspension status | Test unsuspend rule then suspend it in CronFederatedHPA | | diff --git a/test/e2e/coverage_docs/namespace_test.md b/test/e2e/coverage_docs/namespace_test.md index 5e945a234c17..0dc56e97b0e9 100644 --- a/test/e2e/coverage_docs/namespace_test.md +++ b/test/e2e/coverage_docs/namespace_test.md @@ -1,7 +1,7 @@ ### namespace e2e test coverage analysis -| Test Case | E2E Describe Text | Comments | -|------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------| -| Create a namespace that needs to be automatically synchronized across all member clusters. | create a namespace in karmada-apiserver || -| Delete a namespace, and all member clusters need to automatically synchronize the deletion. | delete a namespace from karmada-apiserver || -| When a new cluster joins, the namespaces on the Karmada control plane (excluding reserved namespaces) need to be synchronized to that cluster. | joining new cluster | [Namespace Management](https://karmada.io/docs/next/userguide/bestpractices/namespace-management/#default-namespace-propagation) | +| Test Case | E2E Describe Text | Comments | +|-----------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------| +| Create a namespace that needs to be automatically synchronized across all member clusters | create a namespace in karmada-apiserver || +| Delete a namespace, and all member clusters need to automatically synchronize the deletion | delete a namespace from karmada-apiserver || +| When a new cluster joins, the namespaces on the Karmada control plane (excluding reserved namespaces) need to be synchronized to that cluster | joining new cluster | [Namespace Management](https://karmada.io/docs/next/userguide/bestpractices/namespace-management/#default-namespace-propagation) | diff --git a/test/e2e/coverage_docs/resource_test.md b/test/e2e/coverage_docs/resource_test.md index 568a80453c57..5c69ac8d2323 100644 --- a/test/e2e/coverage_docs/resource_test.md +++ b/test/e2e/coverage_docs/resource_test.md @@ -1,13 +1,13 @@ ### resource e2e test coverage analysis -| Test Case | E2E Describe Text | Comments | -|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------|----------| -| DeploymentStatus collection testing: Test the status collection of Deployment resources. | DeploymentStatus collection | | -| ServiceStatus collection testing: Test the status collection of Service resources. | ServiceStatus collection | | -| NodePort Service collection testing: Test the status collection of NodePort type Service resources. | NodePort Service collection | | -| IngressStatus collection testing: Test the status collection of Ingress resources. | IngressStatus collection | | -| JobStatus collection testing: Test the status collection of Job resources. | JobStatus collection | | -| DaemonSetStatus collection testing: Test the status collection of DaemonSet resources. | DaemonSetStatus collection | | -| StatefulSetStatus collection testing: Test the status collection of StatefulSet resources. | StatefulSetStatus collection | | -| PodDisruptionBudget collection testing: Test the status collection of PodDisruptionBudget resources. | PodDisruptionBudget collection | | -| Workload status synchronization testing: Test the synchronization of workload status when a cluster fails and recovers quickly. | Workload status synchronization | | +| Test Case | E2E Describe Text | Comments | +|--------------------------------------------------------------------------------------------------------------------------------|---------------------------------|----------| +| DeploymentStatus collection testing: Test the status collection of Deployment resources | DeploymentStatus collection | | +| ServiceStatus collection testing: Test the status collection of Service resources | ServiceStatus collection | | +| NodePort Service collection testing: Test the status collection of NodePort type Service resources | NodePort Service collection | | +| IngressStatus collection testing: Test the status collection of Ingress resources | IngressStatus collection | | +| JobStatus collection testing: Test the status collection of Job resources | JobStatus collection | | +| DaemonSetStatus collection testing: Test the status collection of DaemonSet resources | DaemonSetStatus collection | | +| StatefulSetStatus collection testing: Test the status collection of StatefulSet resources | StatefulSetStatus collection | | +| PodDisruptionBudget collection testing: Test the status collection of PodDisruptionBudget resources | PodDisruptionBudget collection | | +| Workload status synchronization testing: Test the synchronization of workload status when a cluster fails and recovers quickly | Workload status synchronization | | diff --git a/test/e2e/coverage_docs/seamless_migration_test.md b/test/e2e/coverage_docs/seamless_migration_test.md new file mode 100644 index 000000000000..333e6e5e64bd --- /dev/null +++ b/test/e2e/coverage_docs/seamless_migration_test.md @@ -0,0 +1,7 @@ +### seamless migration e2e test coverage analysis + +| Test Case | E2E Describe Text | Comments | +|-----------------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------| +| Verify migrate Deployment | Test migrate namespaced resource: Deployment | [Migrate](https://karmada.io/zh/docs/administrator/migration/promote-legacy-workload) | +| Verify migrate ClusterRole | Test migrate cluster resources: ClusterRole | | +| Verify migrate Service (NodePort) | Test migrate namespaced resource: Service (NodePort) | | diff --git a/test/e2e/coverage_docs/spread_by_region_test.md b/test/e2e/coverage_docs/spread_by_region_test.md new file mode 100644 index 000000000000..c69901e8780c --- /dev/null +++ b/test/e2e/coverage_docs/spread_by_region_test.md @@ -0,0 +1,6 @@ +### spread by region e2e test coverage analysis + +| Test Case | E2E Describe Text | Comments | +|---------------------------------------------------------------|-----------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------| +| Verifying the deployment spread across multiple regions | multiple region deployment testing(check whether deployment is scheduled to multiple regions) | [Multi region HA support](https://karmada.io/zh/docs/userguide/scheduling/resource-propagating/#multi-region-ha-support) | +| Updating the propagation policy and verifying its constraints | multiple region deployment testing(update propagation policy to propagate to one region) | | diff --git a/test/e2e/cronfederatedhpa_test.go b/test/e2e/cronfederatedhpa_test.go index 3af2d3f78d28..6819ea3ea079 100644 --- a/test/e2e/cronfederatedhpa_test.go +++ b/test/e2e/cronfederatedhpa_test.go @@ -99,7 +99,7 @@ var _ = ginkgo.Describe("[CronFederatedHPA] CronFederatedHPA testing", func() { framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName) }) - ginkgo.It("Scale FederatedHPA testing", func() { + ginkgo.It("Test scale FederatedHPA testing", func() { framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*fhpa.Spec.MinReplicas)) // Create CronFederatedHPA to scale FederatedHPA @@ -111,7 +111,7 @@ var _ = ginkgo.Describe("[CronFederatedHPA] CronFederatedHPA testing", func() { }) // case 2. Scale deployment. - ginkgo.Context("Scale Deployment", func() { + ginkgo.Context("Test scale Deployment", func() { targetReplicas := pointer.Int32(4) ginkgo.BeforeEach(func() { diff --git a/test/e2e/spread_by_region_test.go b/test/e2e/spread_by_region_test.go index 1c82bf104bcf..48b9093ee9f4 100644 --- a/test/e2e/spread_by_region_test.go +++ b/test/e2e/spread_by_region_test.go @@ -132,10 +132,7 @@ var _ = framework.SerialDescribe("spread-by-region testing", func() { targetClusterNames = append(targetClusterNames, cluster.Name) } - if len(targetClusterNames) != updatedRegionGroups { - return false, nil - } - return true, nil + return len(targetClusterNames) == updatedRegionGroups, nil }, pollTimeout, pollInterval).Should(gomega.Equal(true)) }) })