Skip to content

Commit

Permalink
Merge pull request #3928 from Vacant2333/add_e2e_coverage_analysis
Browse files Browse the repository at this point in the history
Add e2e coverage analysis
  • Loading branch information
karmada-bot committed Aug 11, 2023
2 parents ddae25c + c1d82ba commit 6dea121
Show file tree
Hide file tree
Showing 10 changed files with 63 additions and 38 deletions.
30 changes: 15 additions & 15 deletions test/e2e/aggregatedapi_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
})

ginkgo.When("Serviceaccount(tom) access the member1 cluster", func() {
ginkgo.When(fmt.Sprintf("Serviceaccount(tom) access the %s cluster", member1), func() {
var clusterClient kubernetes.Interface

ginkgo.BeforeEach(func() {
Expand All @@ -175,10 +175,10 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
})

ginkgo.BeforeEach(func() {
klog.Infof("Create ServiceAccount(%s) in the cluster(%s)", klog.KObj(tomServiceAccount).String(), member1)
klog.Infof("Create ServiceAccount(%s) in the %s cluster", klog.KObj(tomServiceAccount).String(), member1)
framework.CreateServiceAccount(clusterClient, tomServiceAccount)
ginkgo.DeferCleanup(func() {
klog.Infof("Delete ServiceAccount(%s) in the cluster(%s)", klog.KObj(tomServiceAccount).String(), member1)
klog.Infof("Delete ServiceAccount(%s) in the %s cluster", klog.KObj(tomServiceAccount).String(), member1)
framework.RemoveServiceAccount(clusterClient, tomServiceAccount.Namespace, tomServiceAccount.Name)
})
})
Expand All @@ -191,27 +191,27 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
framework.RemoveClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember.Name)
})

ginkgo.It("tom access the member cluster", func() {
ginkgo.By("access the cluster `/api` path with right", func() {
ginkgo.It("tom access the member1 cluster api with and without right", func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api` path with right", member1), func() {
gomega.Eventually(func(g gomega.Gomega) (int, error) {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api", member1), tomToken)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
return code, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(http.StatusOK))
})

ginkgo.By("access the cluster /api/v1/nodes path without right", func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path without right", member1), func() {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", member1), tomToken)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden))
})

ginkgo.By("create rbac in the member1 cluster", func() {
ginkgo.By(fmt.Sprintf("create rbac in the %s cluster", member1), func() {
framework.CreateClusterRole(clusterClient, tomClusterRoleOnMember)
framework.CreateClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember)
})

ginkgo.By("access the member1 /api/v1/nodes path with right", func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path with right", member1), func() {
gomega.Eventually(func(g gomega.Gomega) (int, error) {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", member1), tomToken)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
Expand All @@ -221,9 +221,9 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
})
})

ginkgo.When("Serviceaccount(tom) access the member2 cluster", func() {
ginkgo.It("tom access the member cluster without right", func() {
ginkgo.By("access the cluster `/api` path without right", func() {
ginkgo.When(fmt.Sprintf("Serviceaccount(tom) access the %s cluster", member2), func() {
ginkgo.It("tom access the member2 cluster without right", func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api` path without right", member2), func() {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy, member2), tomToken)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden))
Expand Down Expand Up @@ -257,16 +257,16 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
framework.RemoveClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember.Name)
})

ginkgo.It("tom access the member cluster", func() {
ginkgo.By("access the cluster `/api` path with right", func() {
ginkgo.It("tom access the specified cluster with/without right", func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api` path with right", clusterName), func() {
gomega.Eventually(func(g gomega.Gomega) (int, error) {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api", clusterName), tomToken)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
return code, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(http.StatusOK))
})

ginkgo.By("access the cluster /api/v1/nodes path without right", func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path without right", clusterName), func() {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", clusterName), tomToken)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(code).Should(gomega.Equal(http.StatusForbidden))
Expand All @@ -277,7 +277,7 @@ var _ = framework.SerialDescribe("Aggregated Kubernetes API Endpoint testing", f
framework.CreateClusterRoleBinding(clusterClient, tomClusterRoleBindingOnMember)
})

ginkgo.By(fmt.Sprintf("access the %s /api/v1/nodes path with right", clusterName), func() {
ginkgo.By(fmt.Sprintf("access the %s cluster `/api/v1/nodes` path with right", clusterName), func() {
gomega.Eventually(func(g gomega.Gomega) (int, error) {
code, err := helper.DoRequest(fmt.Sprintf(karmadaHost+clusterProxy+"api/v1/nodes", clusterName), tomToken)
g.Expect(err).ShouldNot(gomega.HaveOccurred())
Expand Down
1 change: 0 additions & 1 deletion test/e2e/coverage_docs/E2E_coverage.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
### E2E coverage of features

Features: https://karmada.io/docs/key-features/features/

8 changes: 8 additions & 0 deletions test/e2e/coverage_docs/aggregatedapi_test.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
### aggregatedapi e2e test coverage analysis

#### Serviceaccount Access to (member1/member2/specified) cluster api with or without Permissions
| Test Case | E2E Describe Text | Comments |
|-----------------------------------------------------------------------------------------------|-----------------------------------------------------------|----------------------------------------------------------------------------------------------------------------|
| Serviceaccount Access to the member1 cluster's `/api` and `api/v1/nodes` with/without right | tom access the member1 cluster api with and without right | [Aggregated Kubernetes API Endpoint](https://karmada.io/zh/docs/userguide/globalview/aggregated-api-endpoint/) |
| Serviceaccount Access to the member2 cluster's `/api` without right | tom access the member2 cluster without right | |
| Serviceaccount Access to the specified cluster's `/api` and `api/v1/nodes` with/without right | tom access the specified cluster with/without right | |
8 changes: 8 additions & 0 deletions test/e2e/coverage_docs/cronfederatedhpa_test.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
### cron federated hpa e2e test coverage analysis

| Test Case | E2E Describe Text | Comments |
|-----------------------------------------------------------------------------------------------------------|---------------------------------------------------------|--------------------------------------------------------------------------------|
| Test the scaling of FederatedHPA by creating a CronFederatedHPA rule | Test scale FederatedHPA | [FederatedHPA](https://karmada.io/zh/docs/userguide/autoscaling/federatedhpa/) |
| Test the scaling of a Deployment by creating a CronFederatedHPA rule | Test scale Deployment | |
| Test the suspend rule in CronFederatedHPA by creating a rule that is supposed to be suspended | Test suspend rule in CronFederatedHPA | |
| Test the unsuspend rule, then suspend it in CronFederatedHPA by manipulating the rule's suspension status | Test unsuspend rule then suspend it in CronFederatedHPA | |
10 changes: 5 additions & 5 deletions test/e2e/coverage_docs/namespace_test.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
### namespace e2e test coverage analysis

| Test Case | E2E Describe Text | Comments |
|------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|
| Create a namespace that needs to be automatically synchronized across all member clusters. | create a namespace in karmada-apiserver ||
| Delete a namespace, and all member clusters need to automatically synchronize the deletion. | delete a namespace from karmada-apiserver ||
| When a new cluster joins, the namespaces on the Karmada control plane (excluding reserved namespaces) need to be synchronized to that cluster. | joining new cluster | [Namespace Management](https://karmada.io/docs/next/userguide/bestpractices/namespace-management/#default-namespace-propagation) |
| Test Case | E2E Describe Text | Comments |
|-----------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------|
| Create a namespace that needs to be automatically synchronized across all member clusters | create a namespace in karmada-apiserver ||
| Delete a namespace, and all member clusters need to automatically synchronize the deletion | delete a namespace from karmada-apiserver ||
| When a new cluster joins, the namespaces on the Karmada control plane (excluding reserved namespaces) need to be synchronized to that cluster | joining new cluster | [Namespace Management](https://karmada.io/docs/next/userguide/bestpractices/namespace-management/#default-namespace-propagation) |
22 changes: 11 additions & 11 deletions test/e2e/coverage_docs/resource_test.md
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
### resource e2e test coverage analysis

| Test Case | E2E Describe Text | Comments |
|---------------------------------------------------------------------------------------------------------------------------------|---------------------------------|----------|
| DeploymentStatus collection testing: Test the status collection of Deployment resources. | DeploymentStatus collection | |
| ServiceStatus collection testing: Test the status collection of Service resources. | ServiceStatus collection | |
| NodePort Service collection testing: Test the status collection of NodePort type Service resources. | NodePort Service collection | |
| IngressStatus collection testing: Test the status collection of Ingress resources. | IngressStatus collection | |
| JobStatus collection testing: Test the status collection of Job resources. | JobStatus collection | |
| DaemonSetStatus collection testing: Test the status collection of DaemonSet resources. | DaemonSetStatus collection | |
| StatefulSetStatus collection testing: Test the status collection of StatefulSet resources. | StatefulSetStatus collection | |
| PodDisruptionBudget collection testing: Test the status collection of PodDisruptionBudget resources. | PodDisruptionBudget collection | |
| Workload status synchronization testing: Test the synchronization of workload status when a cluster fails and recovers quickly. | Workload status synchronization | |
| Test Case | E2E Describe Text | Comments |
|--------------------------------------------------------------------------------------------------------------------------------|---------------------------------|----------|
| DeploymentStatus collection testing: Test the status collection of Deployment resources | DeploymentStatus collection | |
| ServiceStatus collection testing: Test the status collection of Service resources | ServiceStatus collection | |
| NodePort Service collection testing: Test the status collection of NodePort type Service resources | NodePort Service collection | |
| IngressStatus collection testing: Test the status collection of Ingress resources | IngressStatus collection | |
| JobStatus collection testing: Test the status collection of Job resources | JobStatus collection | |
| DaemonSetStatus collection testing: Test the status collection of DaemonSet resources | DaemonSetStatus collection | |
| StatefulSetStatus collection testing: Test the status collection of StatefulSet resources | StatefulSetStatus collection | |
| PodDisruptionBudget collection testing: Test the status collection of PodDisruptionBudget resources | PodDisruptionBudget collection | |
| Workload status synchronization testing: Test the synchronization of workload status when a cluster fails and recovers quickly | Workload status synchronization | |
7 changes: 7 additions & 0 deletions test/e2e/coverage_docs/seamless_migration_test.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
### seamless migration e2e test coverage analysis

| Test Case | E2E Describe Text | Comments |
|-----------------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------|
| Verify migrate Deployment | Test migrate namespaced resource: Deployment | [Migrate](https://karmada.io/zh/docs/administrator/migration/promote-legacy-workload) |
| Verify migrate ClusterRole | Test migrate cluster resources: ClusterRole | |
| Verify migrate Service (NodePort) | Test migrate namespaced resource: Service (NodePort) | |
6 changes: 6 additions & 0 deletions test/e2e/coverage_docs/spread_by_region_test.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
### spread by region e2e test coverage analysis

| Test Case | E2E Describe Text | Comments |
|---------------------------------------------------------------|-----------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|
| Verifying the deployment spread across multiple regions | multiple region deployment testing(check whether deployment is scheduled to multiple regions) | [Multi region HA support](https://karmada.io/zh/docs/userguide/scheduling/resource-propagating/#multi-region-ha-support) |
| Updating the propagation policy and verifying its constraints | multiple region deployment testing(update propagation policy to propagate to one region) | |
4 changes: 2 additions & 2 deletions test/e2e/cronfederatedhpa_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ var _ = ginkgo.Describe("[CronFederatedHPA] CronFederatedHPA testing", func() {
framework.RemoveCronFederatedHPA(karmadaClient, testNamespace, cronFHPAName)
})

ginkgo.It("Scale FederatedHPA testing", func() {
ginkgo.It("Test scale FederatedHPA testing", func() {
framework.WaitDeploymentReplicasFitWith(framework.ClusterNames(), testNamespace, deploymentName, int(*fhpa.Spec.MinReplicas))

// Create CronFederatedHPA to scale FederatedHPA
Expand All @@ -111,7 +111,7 @@ var _ = ginkgo.Describe("[CronFederatedHPA] CronFederatedHPA testing", func() {
})

// case 2. Scale deployment.
ginkgo.Context("Scale Deployment", func() {
ginkgo.Context("Test scale Deployment", func() {
targetReplicas := pointer.Int32(4)

ginkgo.BeforeEach(func() {
Expand Down
5 changes: 1 addition & 4 deletions test/e2e/spread_by_region_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,10 +132,7 @@ var _ = framework.SerialDescribe("spread-by-region testing", func() {
targetClusterNames = append(targetClusterNames, cluster.Name)
}

if len(targetClusterNames) != updatedRegionGroups {
return false, nil
}
return true, nil
return len(targetClusterNames) == updatedRegionGroups, nil
}, pollTimeout, pollInterval).Should(gomega.Equal(true))
})
})
Expand Down

0 comments on commit 6dea121

Please sign in to comment.