diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml new file mode 100644 index 00000000..21d3bfa0 --- /dev/null +++ b/.github/workflows/e2e.yaml @@ -0,0 +1,84 @@ +name: E2E +on: + pull_request: + branches: + - main + - release-* + push: + branches: + - main + - release-* + +env: + GO_VERSION: '1.19' + KIND_VERSION: 'v0.14.0' + KIND_IMAGE: 'kindest/node:v1.22.2' + KIND_CLUSTER_NAME: 'e2e-test' + +jobs: + + CollaSet: + runs-on: ubuntu-20.04 + steps: + - uses: actions/checkout@v3 + with: + submodules: true + - name: Setup Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: Cache Go Dependencies + uses: actions/cache@v2 + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: ${{ runner.os }}-go- + - name: Setup Kind Cluster + uses: helm/kind-action@v1.10.0 + with: + node_image: ${{ env.KIND_IMAGE }} + cluster_name: ${{ env.KIND_CLUSTER_NAME }} + config: ./test/e2e/scripts/kind-conf.yaml + version: ${{ env.KIND_VERSION }} + - name: Build Image + run: | + mkdir -p /tmp/kind + make kind-kube-config + make docker-build + make sync-kind-image + - name: Install Operating + run: | + set -ex + kubectl cluster-info + make deploy + for ((i=1;i<10;i++)); + do + set +e + PODS=$(kubectl get pod -n kusionstack-system | grep -c '1/1') + set -e + if [ "$PODS" -eq 1 ]; then + break + fi + sleep 3 + done + set -e + PODS=$(kubectl get pod -n kusionstack-system | grep -c '1/1') + if [ "$PODS" -eq 1 ]; then + echo "Wait for Kusionstack-manager ready successfully" + else + echo "Timeout to wait for Kusionstack-manager ready" + fi + - name: Run e2e Tests + run: | + make ginkgo + set -e + KUBECONFIG=/tmp/kind/kubeconfig.yaml ./bin/ginkgo -timeout 10m -v --focus='\[apps\] CollaSet' test/e2e + restartCount=$(kubectl get pod -n kusionstack-system -l control-plane=controller-manager --no-headers | awk '{print $4}') + if [ "${restartCount}" -eq "0" ];then + echo "Kusionstack-manager has not restarted" + else + kubectl get pod -n kusionstack-system -l control-plane=controller-manager --no-headers + echo "Kusionstack-manager has restarted, abort!!!" + kubectl get pod -n kusionstack-system --no-headers -l control-plane=controller-manager | awk '{print $1}' | xargs kubectl logs -p -n kusionstack-system + exit 1 + fi diff --git a/Makefile b/Makefile index 7380b0df..3d04bc8d 100644 --- a/Makefile +++ b/Makefile @@ -62,7 +62,7 @@ vet: ## Run go vet against code. .PHONY: test test: manifests generate fmt vet envtest ## Run tests. - KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./... -coverprofile cover.out + KUBEBUILDER_ASSETS="$(shell $(ENVTEST) use $(ENVTEST_K8S_VERSION) --bin-dir $(LOCALBIN) -p path)" go test ./pkg/... -coverprofile cover.out ##@ Build @@ -150,10 +150,10 @@ clean-kind: .PHONY: e2e-local e2e-local: - KUBECONFIG=/tmp/kind-kubeconfig.yaml ./bin/ginkgo -timeout 10m -v test/e2e + KUBECONFIG=/tmp/kind/kubeconfig.yaml ./bin/ginkgo -timeout 10m -v test/e2e .PHONY: e2e-all -e2e-all: e2e-local-deploy e2e-local clean-kind +e2e-all: deploy-in-kind e2e-local clean-kind ##@ Build Dependencies diff --git a/test/e2e/apps/collaset.go b/test/e2e/apps/collaset.go new file mode 100644 index 00000000..743043fa --- /dev/null +++ b/test/e2e/apps/collaset.go @@ -0,0 +1,572 @@ +/* +Copyright 2024 The KusionStack Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apps + +import ( + "time" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + clientset "k8s.io/client-go/kubernetes" + imageutils "k8s.io/kubernetes/test/utils/image" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1alpha1 "kusionstack.io/operating/apis/apps/v1alpha1" + "kusionstack.io/operating/test/e2e/framework" +) + +var _ = SIGDescribe("CollaSet", func() { + + f := framework.NewDefaultFramework("collaset") + var client client.Client + var ns string + var clientSet clientset.Interface + var tester *framework.CollaSetTester + var randStr string + + BeforeEach(func() { + clientSet = f.ClientSet + client = f.Client + ns = f.Namespace.Name + tester = framework.NewCollaSetTester(clientSet, client, ns) + randStr = rand.String(10) + }) + + framework.KusionstackDescribe("CollaSet Scaling", func() { + + framework.ConformanceIt("scales in normal cases", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 3, appsv1alpha1.UpdateStrategy{}) + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 3, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Scale in 1 pod") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Replicas = int32Pointer(2) + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 2, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + }) + + framework.ConformanceIt("recreate pod with same revision", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 2, appsv1alpha1.UpdateStrategy{}) + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 2, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Update CollaSet with 1 partition") + oldRevision := cls.Status.CurrentRevision + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Template.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxNew) + cls.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateCollaSetStrategy{ + ByPartition: &appsv1alpha1.ByPartition{ + Partition: int32Pointer(1), + }, + } + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for update finished") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 1, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Delete the old pod") + var deletePod, updatedPod string + Eventually(func() bool { + pods, err := tester.ListPodsForCollaSet(cls) + Expect(err).NotTo(HaveOccurred()) + for i := range pods { + pod := pods[i] + if pod.Labels[appsv1.ControllerRevisionHashLabelKey] == cls.Status.UpdatedRevision && updatedPod == "" { + updatedPod = pod.Name + } else if pod.Labels[appsv1.ControllerRevisionHashLabelKey] == cls.Status.CurrentRevision && deletePod == "" { + Expect(tester.UpdatePod(pod, func(pod *v1.Pod) { + pod.Labels[appsv1alpha1.PodDeletionIndicationLabelKey] = "true" + })).NotTo(HaveOccurred()) + deletePod = pod.Name + } + } + return deletePod != "" && updatedPod != "" + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for pod deleted and recreated") + Eventually(func() bool { + pods, err := tester.ListPodsForCollaSet(cls) + if err != nil { + return false + } + for i := range pods { + if pods[i].Name == deletePod { + return false + } + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 1, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Check recreate pod revision") + Eventually(func() bool { + pods, err := tester.ListPodsForCollaSet(cls) + if err != nil { + return false + } + for i := range pods { + if pods[i].Name != updatedPod { + return pods[i].Labels[appsv1.ControllerRevisionHashLabelKey] == oldRevision + } + } + return false + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + }) + + framework.ConformanceIt("Selective delete and scale in pods", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 3, appsv1alpha1.UpdateStrategy{}) + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 3, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("selective delete pods") + pods, err := tester.ListPodsForCollaSet(cls) + Expect(err).NotTo(HaveOccurred()) + podToDelete := pods[0] + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.ScaleStrategy = appsv1alpha1.ScaleStrategy{ + PodToDelete: []string{podToDelete.Name}, + } + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for selective delete pods finished") + Eventually(func() bool { + if err = tester.GetCollaSet(cls); err != nil { + return false + } + return len(cls.Spec.ScaleStrategy.PodToDelete) == 0 + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("Check pod is deleted") + Eventually(func() bool { + pods, err = tester.ListPodsForCollaSet(cls) + if err != nil { + return false + } + for i := range pods { + if pods[i].Name == podToDelete.Name { + return false + } + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("selective scale in pods") + pods, err = tester.ListPodsForCollaSet(cls) + Expect(err).NotTo(HaveOccurred()) + podToDelete = pods[0] + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.ScaleStrategy = appsv1alpha1.ScaleStrategy{ + PodToDelete: []string{podToDelete.Name}, + } + cls.Spec.Replicas = int32Pointer(2) + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for selective scale in pods finished") + Eventually(func() bool { + if err = tester.GetCollaSet(cls); err != nil { + return false + } + return len(cls.Spec.ScaleStrategy.PodToDelete) == 0 + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("Check pod is scaled in") + Eventually(func() bool { + pods, err = tester.ListPodsForCollaSet(cls) + if err != nil { + return false + } + for i := range pods { + if pods[i].Name == podToDelete.Name { + return false + } + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + }) + + framework.ConformanceIt("PVC retention policy with scale in pods", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 2, appsv1alpha1.UpdateStrategy{}) + cls.Spec.ScaleStrategy.PersistentVolumeClaimRetentionPolicy = &appsv1alpha1.PersistentVolumeClaimRetentionPolicy{ + WhenScaled: appsv1alpha1.RetainPersistentVolumeClaimRetentionPolicyType, + } + cls.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-test", + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + "storage": resource.MustParse("100m"), + }, + }, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + }, + }, + } + cls.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{ + { + MountPath: "/path/to/mount", + Name: "pvc-test", + }, + } + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 2, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Wait for PVC provisioned") + pvcNames := sets.String{} + Eventually(func() bool { + pvcs, err := tester.ListPVCForCollaSet(cls) + if err != nil { + return false + } + if len(pvcs) != 2 { + return false + } + for i := range pvcs { + pvcNames.Insert(pvcs[i].Name) + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("Scale in 1 replicas") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Replicas = int32Pointer(1) + })).NotTo(HaveOccurred()) + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 1, 1, 1, 1, 1) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Check PVC is reserved") + Eventually(func() bool { + pvcs, err := tester.ListPVCForCollaSet(cls) + if err != nil { + return false + } + return len(pvcs) == 2 + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("Scale out 1 replicas") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Replicas = int32Pointer(2) + })).NotTo(HaveOccurred()) + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 2, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Check PVC is retained") + Eventually(func() bool { + pvcs, err := tester.ListPVCForCollaSet(cls) + if err != nil { + return false + } + if len(pvcs) != 2 { + return false + } + for i := range pvcs { + if !pvcNames.Has(pvcs[i].Name) { + return false + } + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + }) + }) + + framework.KusionstackDescribe("CollaSet Updating", func() { + + framework.ConformanceIt("in-place update images with same imageID", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 1, appsv1alpha1.UpdateStrategy{PodUpdatePolicy: appsv1alpha1.CollaSetInPlaceIfPossiblePodUpdateStrategyType}) + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 1, 1, 1, 1, 1) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + pods, err := tester.ListPodsForCollaSet(cls) + Expect(err).NotTo(HaveOccurred()) + oldPodUID := pods[0].UID + oldContainerStatus := pods[0].Status.ContainerStatuses[0] + + By("Update image to nginxNew") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Template.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxNew) + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err = tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for all pods updated and ready") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 1, 1, 1, 1, 1) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Verify the PodUID not changed but containerID and imageID changed") + pods, err = tester.ListPodsForCollaSet(cls) + Expect(err).NotTo(HaveOccurred()) + newPodUID := pods[0].UID + newContainerStatus := pods[0].Status.ContainerStatuses[0] + + Expect(oldPodUID).Should(Equal(newPodUID)) + Expect(newContainerStatus.ContainerID).NotTo(Equal(oldContainerStatus.ContainerID)) + Expect(newContainerStatus.ImageID).NotTo(Equal(oldContainerStatus.ImageID)) + }) + + framework.ConformanceIt("update pods by label", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 3, appsv1alpha1.UpdateStrategy{}) + cls.Spec.UpdateStrategy = appsv1alpha1.UpdateStrategy{ + RollingUpdate: &appsv1alpha1.RollingUpdateCollaSetStrategy{ + ByLabel: &appsv1alpha1.ByLabel{}, + }, + } + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 3, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Update image to nginxNew but pods are not updated") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Template.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxNew) + })).NotTo(HaveOccurred()) + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 0, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Label pod to trigger update") + pods, err := tester.ListPodsForCollaSet(cls) + Expect(err).NotTo(HaveOccurred()) + podToUpdate := pods[0] + Expect(tester.UpdatePod(podToUpdate, func(pod *v1.Pod) { + pod.Labels[appsv1alpha1.CollaSetUpdateIndicateLabelKey] = "true" + })).NotTo(HaveOccurred()) + + By("Wait for update finished") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 1, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + }) + + framework.ConformanceIt("PVC template update", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 2, appsv1alpha1.UpdateStrategy{}) + cls.Spec.VolumeClaimTemplates = []v1.PersistentVolumeClaim{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "pvc-test", + }, + Spec: v1.PersistentVolumeClaimSpec{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + "storage": resource.MustParse("100m"), + }, + }, + AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, + }, + }, + } + cls.Spec.Template.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{ + { + MountPath: "/path/to/mount", + Name: "pvc-test", + }, + } + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 2, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Wait for PVC provisioned") + oldPvcNames := sets.String{} + oldRevision := cls.Status.UpdatedRevision + Eventually(func() bool { + pvcs, err := tester.ListPVCForCollaSet(cls) + if err != nil { + return false + } + if len(pvcs) != 2 { + return false + } + for i := range pvcs { + oldPvcNames.Insert(pvcs[i].Name) + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + + By("Update PVC template") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.VolumeClaimTemplates[0].Spec.Resources = v1.ResourceRequirements{ + Requests: v1.ResourceList{ + "storage": resource.MustParse("200m"), + }, + } + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Wait for PVCs and pods recreated") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Status.UpdatedRevision != oldRevision + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 2, 2, 2, 2, 2) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Check new PVCs provisioned") + Eventually(func() bool { + pvcs, err := tester.ListPVCForCollaSet(cls) + if err != nil { + return false + } + if len(pvcs) != 2 { + return false + } + for i := range pvcs { + if oldPvcNames.Has(pvcs[i].Name) { + return false + } + if pvcs[i].Spec.Resources.Requests["storage"] != resource.MustParse("200m") { + return false + } + } + return true + }, 30*time.Second, 3*time.Second).Should(Equal(true)) + }) + + framework.ConformanceIt("replace update by partition", func() { + cls := tester.NewCollaSet("collaset-"+randStr, 3, appsv1alpha1.UpdateStrategy{PodUpdatePolicy: appsv1alpha1.CollaSetReplacePodUpdateStrategyType}) + Expect(tester.CreateCollaSet(cls)).NotTo(HaveOccurred()) + + By("Wait for status replicas satisfied") + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 3, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + + By("Update image to nginxNew") + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.Template.Spec.Containers[0].Image = imageutils.GetE2EImage(imageutils.NginxNew) + cls.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateCollaSetStrategy{ + ByPartition: &appsv1alpha1.ByPartition{ + Partition: int32Pointer(3), + }, + } + })).NotTo(HaveOccurred()) + + By("Wait for CollaSet reconciled") + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + + By("Update CollaSet by partition") + for _, partition := range []int32{3, 2, 1, 0} { + Expect(tester.UpdateCollaSet(cls, func(cls *appsv1alpha1.CollaSet) { + cls.Spec.UpdateStrategy.RollingUpdate = &appsv1alpha1.RollingUpdateCollaSetStrategy{ + ByPartition: &appsv1alpha1.ByPartition{ + Partition: &partition, + }, + } + })).NotTo(HaveOccurred()) + Eventually(func() bool { + if err := tester.GetCollaSet(cls); err != nil { + return false + } + return cls.Generation == cls.Status.ObservedGeneration + }, 10*time.Second, 3*time.Second).Should(Equal(true)) + Eventually(func() error { return tester.ExpectedStatusReplicas(cls, 3, 3, 3, 3-partition, 3) }, 30*time.Second, 3*time.Second).ShouldNot(HaveOccurred()) + } + }) + }) + +}) + +func int32Pointer(val int32) *int32 { + return &val +} diff --git a/test/e2e/e2e.sh b/test/e2e/e2e.sh deleted file mode 100644 index ed9aafe3..00000000 --- a/test/e2e/e2e.sh +++ /dev/null @@ -1,90 +0,0 @@ -#!/usr/bin/env bash - -set -o errexit -set -o nounset -set -o pipefail - -readonly KIND_VERSION=v0.17.0 -readonly CLUSTER_NAME=operating - -cleanup() { - echo 'Removing kind cluster...' - kind delete cluster --name="$CLUSTER_NAME" - - echo 'Done!' -} - -create_kind_cluster() { - echo 'creating kind cluster...' - - # kind-darwin-amd64, kind-linux-amd64 - curl -Lo ./kind "https://github.com/kubernetes-sigs/kind/releases/download/$KIND_VERSION/kind-linux-amd64" - chmod +x ./kind - sudo mv ./kind /usr/local/bin/kind - - kind create cluster --name "$CLUSTER_NAME" --config ./test/e2e/kind-config.yaml --image "$KIND_IMAGE" --wait 300s - - echo 'export kubeconfig...' - export KUBECONFIG=~/.kube/config - - echo "installing kubectl..." - curl -Lo ./kubectl "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/darwin/amd64/kubectl" - chmod +x ./kubectl - sudo mv ./kubectl /usr/local/bin/kubectl - - kubectl cluster-info - echo - - kubectl get nodes - echo - - echo 'Cluster ready!' - echo -} - -install_controllers() { - echo "docker pull image" - docker pull $IMAGE - kind load docker-image $IMAGE --name "$CLUSTER_NAME" - - echo "installing kustomize..." - make kustomize - KUSTOMIZE=$(pwd)/bin/kustomize - - echo "preparing kustomize yaml" - (cd ./config/manager && $KUSTOMIZE edit set image controller=$IMAGE) - - echo "installing controller..." - $KUSTOMIZE build ./config/default | sed -e 's/imagePullPolicy: Always/imagePullPolicy: IfNotPresent/g' | kubectl apply -f - -} - -install_deps() { - if ! command -v sudo &> /dev/null - then - echo "command sudo could not be found and install it" - yum install -y sudo - fi - - if ! command -v docker &> /dev/null - then - echo "command docker could not be found and install it" - yum install -y docker - fi - - sudo systemctl start docker -} - -main() { - if [ -z "$IMAGE" ]; then - echo "no image provided by env var IMAGE" - exit 1 - fi - - install_deps - create_kind_cluster - trap cleanup EXIT - - install_controllers -} - -main \ No newline at end of file diff --git a/test/e2e/framework/collaset_util.go b/test/e2e/framework/collaset_util.go new file mode 100644 index 00000000..2fb56145 --- /dev/null +++ b/test/e2e/framework/collaset_util.go @@ -0,0 +1,184 @@ +/* +Copyright 2024 The KusionStack Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "context" + "fmt" + "sort" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/util/retry" + imageutils "k8s.io/kubernetes/test/utils/image" + "sigs.k8s.io/controller-runtime/pkg/client" + + appsv1alpha1 "kusionstack.io/operating/apis/apps/v1alpha1" +) + +type CollaSetTester struct { + clientSet clientset.Interface + client client.Client + ns string +} + +func NewCollaSetTester(clientSet clientset.Interface, client client.Client, ns string) *CollaSetTester { + return &CollaSetTester{ + clientSet: clientSet, + client: client, + ns: ns, + } +} + +func (t *CollaSetTester) NewCollaSet(name string, replicas int32, updateStrategy appsv1alpha1.UpdateStrategy) *appsv1alpha1.CollaSet { + return &appsv1alpha1.CollaSet{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: t.ns, + Name: name, + }, + Spec: appsv1alpha1.CollaSetSpec{ + Replicas: &replicas, + Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"owner": name}}, + UpdateStrategy: updateStrategy, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{"owner": name}, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "nginx", + Image: imageutils.GetE2EImage(imageutils.Nginx), + Env: []v1.EnvVar{ + {Name: "test", Value: "foo"}, + }, + }, + }, + }, + }, + }, + } +} + +func (t *CollaSetTester) CreateCollaSet(cls *appsv1alpha1.CollaSet) error { + return t.client.Create(context.TODO(), cls) +} + +func (t *CollaSetTester) GetCollaSet(cls *appsv1alpha1.CollaSet) error { + return t.client.Get(context.TODO(), types.NamespacedName{Namespace: cls.Namespace, Name: cls.Name}, cls) +} + +func (t *CollaSetTester) UpdateCollaSet(cls *appsv1alpha1.CollaSet, fn func(cls *appsv1alpha1.CollaSet)) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + err := t.GetCollaSet(cls) + if err != nil { + return err + } + + fn(cls) + err = t.client.Update(context.TODO(), cls) + return err + }) +} + +func (t *CollaSetTester) ListPodsForCollaSet(cls *appsv1alpha1.CollaSet) (pods []*v1.Pod, err error) { + podList := &v1.PodList{} + if err = t.client.List(context.TODO(), podList); err != nil { + return nil, err + } + for i := range podList.Items { + pod := &podList.Items[i] + // ignore deleting pod + if pod.DeletionTimestamp != nil { + continue + } + if owner := metav1.GetControllerOf(pod); owner != nil && owner.Name == cls.Name { + pods = append(pods, pod) + } + } + sort.SliceStable(pods, func(i, j int) bool { + return pods[i].Name < pods[j].Name + }) + return +} + +func (t *CollaSetTester) ListPVCForCollaSet(cls *appsv1alpha1.CollaSet) (pvcs []*v1.PersistentVolumeClaim, err error) { + pvcList := &v1.PersistentVolumeClaimList{} + err = t.client.List(context.TODO(), pvcList, client.InNamespace(cls.Namespace)) + if err != nil { + return nil, err + } + for i := range pvcList.Items { + pvc := &pvcList.Items[i] + if pvc.DeletionTimestamp.IsZero() { + pvcs = append(pvcs, pvc) + } + } + sort.SliceStable(pvcs, func(i, j int) bool { + return pvcs[i].Name < pvcs[j].Name + }) + return +} + +func (t *CollaSetTester) DeleteCollaSet(cls *appsv1alpha1.CollaSet) error { + return t.client.Delete(context.TODO(), cls) +} + +func (t *CollaSetTester) UpdatePod(pod *v1.Pod, fn func(pod *v1.Pod)) error { + return retry.RetryOnConflict(retry.DefaultBackoff, func() error { + err := t.client.Get(context.TODO(), types.NamespacedName{Namespace: pod.Namespace, Name: pod.Name}, pod) + if err != nil { + return err + } + + fn(pod) + err = t.client.Update(context.TODO(), pod) + return err + }) +} + +func (t *CollaSetTester) ExpectedStatusReplicas(cls *appsv1alpha1.CollaSet, replicas, readyReplicas, availableReplicas, updatedReplicas, totalReplicas int32) error { + if err := t.client.Get(context.TODO(), types.NamespacedName{Namespace: cls.Namespace, Name: cls.Name}, cls); err != nil { + return err + } + + if cls.Status.Replicas != replicas { + return fmt.Errorf("replicas got %d, expected %d", cls.Status.Replicas, replicas) + } + + if cls.Status.ReadyReplicas != readyReplicas { + return fmt.Errorf("readyReplicas got %d, expected %d", cls.Status.ReadyReplicas, readyReplicas) + } + + if cls.Status.AvailableReplicas != availableReplicas { + return fmt.Errorf("availableReplicas got %d, expected %d", cls.Status.AvailableReplicas, availableReplicas) + } + + if cls.Status.UpdatedReplicas != updatedReplicas { + return fmt.Errorf("updatedReplicas got %d, expected %d", cls.Status.UpdatedReplicas, updatedReplicas) + } + + if pods, err := t.ListPodsForCollaSet(cls); err != nil { + return err + } else if len(pods) != int(totalReplicas) { + return fmt.Errorf("totalReplicas got %d, expected %d", len(pods), totalReplicas) + } + + return nil +} diff --git a/test/e2e/kind-config.yaml b/test/e2e/kind-config.yaml deleted file mode 100644 index 54eb1d01..00000000 --- a/test/e2e/kind-config.yaml +++ /dev/null @@ -1,5 +0,0 @@ -kind: Cluster -apiVersion: kind.x-k8s.io/v1alpha4 -nodes: - - role: control-plane - - role: worker \ No newline at end of file