From 0ee9be39bbbb4ba8f1cc009b6553b36a088e697d Mon Sep 17 00:00:00 2001 From: rayywu Date: Tue, 23 May 2023 15:19:33 +0800 Subject: [PATCH 1/4] Add UT for pkg/controllers/status/crb_status_controller.go Signed-off-by: rayywu --- .../status/crb_status_controller_test.go | 205 ++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 pkg/controllers/status/crb_status_controller_test.go diff --git a/pkg/controllers/status/crb_status_controller_test.go b/pkg/controllers/status/crb_status_controller_test.go new file mode 100644 index 000000000000..88feaff69664 --- /dev/null +++ b/pkg/controllers/status/crb_status_controller_test.go @@ -0,0 +1,205 @@ +package status + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" + "github.com/karmada-io/karmada/pkg/util/gclient" +) + +func generateCRBStatusController() *CRBStatusController { + stopCh := make(chan struct{}) + defer close(stopCh) + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}}) + m := genericmanager.NewSingleClusterInformerManager(dynamicClient, 0, stopCh) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + + c := &CRBStatusController{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + DynamicClient: dynamicClient, + InformerManager: m, + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + EventRecorder: &record.FakeRecorder{}, + } + return c +} + +func TestCRBStatusController_Reconcile(t *testing.T) { + preTime := metav1.Date(2023, 0, 0, 0, 0, 0, 0, time.UTC) + tests := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + expectRes controllerruntime.Result + expectError bool + }{ + { + name: "failed in syncBindingStatus", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + expectRes: controllerruntime.Result{}, + expectError: false, + }, + { + name: "binding not found in client", + expectRes: controllerruntime.Result{}, + expectError: false, + }, + { + name: "failed in syncBindingStatus", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + DeletionTimestamp: &preTime, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + expectRes: controllerruntime.Result{}, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateCRBStatusController() + + // Prepare req + req := controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: "binding", + Namespace: "default", + }, + } + + // Prepare binding and create it in client + if tt.binding != nil { + if err := c.Client.Create(context.Background(), tt.binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + } + + res, err := c.Reconcile(context.Background(), req) + assert.Equal(t, tt.expectRes, res) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestCRBStatusController_syncBindingStatus(t *testing.T) { + tests := []struct { + name string + resource workv1alpha2.ObjectReference + podNameInDynamicClient string + resourceExistInClient bool + expectedError bool + }{ + { + name: "failed in FetchResourceTemplate, err is NotFound", + resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + podNameInDynamicClient: "pod1", + resourceExistInClient: true, + expectedError: false, + }, + { + name: "failed in FetchResourceTemplate, err is not NotFound", + resource: workv1alpha2.ObjectReference{}, + podNameInDynamicClient: "pod", + resourceExistInClient: true, + expectedError: true, + }, + { + name: "failed in AggregateClusterResourceBindingWorkStatus", + resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + podNameInDynamicClient: "pod", + resourceExistInClient: false, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateCRBStatusController() + c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}}) + + binding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: tt.resource, + }, + } + + if tt.resourceExistInClient { + if err := c.Client.Create(context.Background(), binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + } + + err := c.syncBindingStatus(binding) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} From 8e0f186302f347010be38a1341656f8cd746acb4 Mon Sep 17 00:00:00 2001 From: rayywu Date: Thu, 1 Jun 2023 20:41:18 +0800 Subject: [PATCH 2/4] Add UT for pkg/controllers/status/rb_status_controller.go Signed-off-by: rayywu --- .../status/rb_status_controller_test.go | 203 ++++++++++++++++++ 1 file changed, 203 insertions(+) create mode 100644 pkg/controllers/status/rb_status_controller_test.go diff --git a/pkg/controllers/status/rb_status_controller_test.go b/pkg/controllers/status/rb_status_controller_test.go new file mode 100644 index 000000000000..7ae89a0f6a75 --- /dev/null +++ b/pkg/controllers/status/rb_status_controller_test.go @@ -0,0 +1,203 @@ +package status + +import ( + "context" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" + "github.com/karmada-io/karmada/pkg/util/gclient" + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + dynamicfake "k8s.io/client-go/dynamic/fake" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "testing" + "time" +) + +func generateRBStatusController() *RBStatusController { + stopCh := make(chan struct{}) + defer close(stopCh) + dynamicClient := dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "pod1", Namespace: "default"}}) + m := genericmanager.NewSingleClusterInformerManager(dynamicClient, 0, stopCh) + m.Lister(corev1.SchemeGroupVersion.WithResource("pods")) + m.Start() + m.WaitForCacheSync() + + c := &RBStatusController{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + DynamicClient: dynamicClient, + InformerManager: m, + RESTMapper: func() meta.RESTMapper { + m := meta.NewDefaultRESTMapper([]schema.GroupVersion{corev1.SchemeGroupVersion}) + m.Add(corev1.SchemeGroupVersion.WithKind("Pod"), meta.RESTScopeNamespace) + return m + }(), + EventRecorder: &record.FakeRecorder{}, + } + return c +} + +func TestRBStatusController_Reconcile(t *testing.T) { + preTime := metav1.Date(2023, 0, 0, 0, 0, 0, 0, time.UTC) + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expectRes controllerruntime.Result + expectError bool + }{ + { + name: "failed in syncBindingStatus", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + expectRes: controllerruntime.Result{}, + expectError: false, + }, + { + name: "binding not found in client", + expectRes: controllerruntime.Result{}, + expectError: false, + }, + { + name: "failed in syncBindingStatus", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + DeletionTimestamp: &preTime, + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + expectRes: controllerruntime.Result{}, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateRBStatusController() + + // Prepare req + req := controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: "binding", + Namespace: "default", + }, + } + + // Prepare binding and create it in client + if tt.binding != nil { + if err := c.Client.Create(context.Background(), tt.binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + } + + res, err := c.Reconcile(context.Background(), req) + assert.Equal(t, tt.expectRes, res) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestRBStatusController_syncBindingStatus(t *testing.T) { + tests := []struct { + name string + resource workv1alpha2.ObjectReference + podNameInDynamicClient string + resourceExistInClient bool + expectedError bool + }{ + { + name: "failed in FetchResourceTemplate, err is NotFound", + resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + podNameInDynamicClient: "pod1", + resourceExistInClient: true, + expectedError: false, + }, + { + name: "failed in FetchResourceTemplate, err is not NotFound", + resource: workv1alpha2.ObjectReference{}, + podNameInDynamicClient: "pod", + resourceExistInClient: true, + expectedError: true, + }, + { + name: "failed in AggregateClusterResourceBindingWorkStatus", + resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + podNameInDynamicClient: "pod", + resourceExistInClient: false, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateRBStatusController() + c.DynamicClient = dynamicfake.NewSimpleDynamicClient(scheme.Scheme, + &corev1.Pod{ObjectMeta: metav1.ObjectMeta{Name: tt.podNameInDynamicClient, Namespace: "default"}}) + + binding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: tt.resource, + }, + } + + if tt.resourceExistInClient { + if err := c.Client.Create(context.Background(), binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + } + + err := c.syncBindingStatus(binding) + + if tt.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} From e97d29500b24d09ff57b8f48a62d042f67dca9f3 Mon Sep 17 00:00:00 2001 From: rayywu Date: Thu, 1 Jun 2023 21:07:47 +0800 Subject: [PATCH 3/4] Add UT for pkg/controllers/applicationfailover/common.go Signed-off-by: rayywu --- .../applicationfailover/common_test.go | 78 +++++++++++++++++++ .../status/rb_status_controller_test.go | 12 +-- 2 files changed, 85 insertions(+), 5 deletions(-) diff --git a/pkg/controllers/applicationfailover/common_test.go b/pkg/controllers/applicationfailover/common_test.go index 84a32d4897e2..da16ac736f16 100644 --- a/pkg/controllers/applicationfailover/common_test.go +++ b/pkg/controllers/applicationfailover/common_test.go @@ -4,9 +4,87 @@ import ( "reflect" "testing" + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" ) +func TestNewWorkloadUnhealthyMap(t *testing.T) { + m := newWorkloadUnhealthyMap() + expected := &workloadUnhealthyMap{ + workloadUnhealthy: make(map[types.NamespacedName]map[string]metav1.Time), + } + assert.Equal(t, expected, m) +} + +func TestTimeStampProcess(t *testing.T) { + key := types.NamespacedName{ + Namespace: "default", + Name: "test", + } + cluster := "cluster-1" + + m := newWorkloadUnhealthyMap() + m.setTimeStamp(key, cluster) + res := m.hasWorkloadBeenUnhealthy(key, cluster) + assert.Equal(t, true, res) + + time := m.getTimeStamp(key, cluster) + assert.NotEmpty(t, time) + + m.delete(key) + res = m.hasWorkloadBeenUnhealthy(key, cluster) + assert.Equal(t, false, res) +} + +func TestWorkloadUnhealthyMap_deleteIrrelevantClusters(t *testing.T) { + cluster1 := "cluster-1" + cluster2 := "cluster-2" + cluster3 := "cluster-3" + t.Run("normal case", func(t *testing.T) { + key := types.NamespacedName{ + Namespace: "default", + Name: "test", + } + + m := newWorkloadUnhealthyMap() + + m.setTimeStamp(key, cluster1) + m.setTimeStamp(key, cluster2) + m.setTimeStamp(key, cluster3) + + allClusters := sets.New[string](cluster2, cluster3) + healthyClusters := []string{cluster3} + + m.deleteIrrelevantClusters(key, allClusters, healthyClusters) + res1 := m.hasWorkloadBeenUnhealthy(key, cluster1) + assert.Equal(t, false, res1) + res2 := m.hasWorkloadBeenUnhealthy(key, cluster2) + assert.Equal(t, true, res2) + res3 := m.hasWorkloadBeenUnhealthy(key, cluster3) + assert.Equal(t, false, res3) + }) + + t.Run("unhealthyClusters is nil", func(t *testing.T) { + key := types.NamespacedName{ + Namespace: "default", + Name: "test", + } + + m := newWorkloadUnhealthyMap() + + allClusters := sets.New[string](cluster2, cluster3) + healthyClusters := []string{cluster3} + + m.deleteIrrelevantClusters(key, allClusters, healthyClusters) + res := m.hasWorkloadBeenUnhealthy(key, cluster2) + assert.Equal(t, false, res) + }) +} + func TestDistinguishUnhealthyClustersWithOthers(t *testing.T) { tests := []struct { name string diff --git a/pkg/controllers/status/rb_status_controller_test.go b/pkg/controllers/status/rb_status_controller_test.go index 7ae89a0f6a75..f0b14e544def 100644 --- a/pkg/controllers/status/rb_status_controller_test.go +++ b/pkg/controllers/status/rb_status_controller_test.go @@ -2,9 +2,9 @@ package status import ( "context" - workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" - "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" - "github.com/karmada-io/karmada/pkg/util/gclient" + "testing" + "time" + "github.com/stretchr/testify/assert" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/meta" @@ -16,8 +16,10 @@ import ( "k8s.io/client-go/tools/record" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" - "testing" - "time" + + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/fedinformer/genericmanager" + "github.com/karmada-io/karmada/pkg/util/gclient" ) func generateRBStatusController() *RBStatusController { From 7864e11267ac5d8e99d0102bfcedc0b52637432a Mon Sep 17 00:00:00 2001 From: rayywu Date: Fri, 2 Jun 2023 22:13:50 +0800 Subject: [PATCH 4/4] Add UT for crb_application_failover_controller.go and rb_application_failover_controller.go Signed-off-by: rayywu --- .../applicationfailover/common_test.go | 9 - ...rb_application_failover_controller_test.go | 377 ++++++++++++++++++ ...rb_application_failover_controller_test.go | 358 +++++++++++++++++ 3 files changed, 735 insertions(+), 9 deletions(-) create mode 100644 pkg/controllers/applicationfailover/crb_application_failover_controller_test.go create mode 100644 pkg/controllers/applicationfailover/rb_application_failover_controller_test.go diff --git a/pkg/controllers/applicationfailover/common_test.go b/pkg/controllers/applicationfailover/common_test.go index da16ac736f16..58714a62469d 100644 --- a/pkg/controllers/applicationfailover/common_test.go +++ b/pkg/controllers/applicationfailover/common_test.go @@ -5,21 +5,12 @@ import ( "testing" "github.com/stretchr/testify/assert" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" ) -func TestNewWorkloadUnhealthyMap(t *testing.T) { - m := newWorkloadUnhealthyMap() - expected := &workloadUnhealthyMap{ - workloadUnhealthy: make(map[types.NamespacedName]map[string]metav1.Time), - } - assert.Equal(t, expected, m) -} - func TestTimeStampProcess(t *testing.T) { key := types.NamespacedName{ Namespace: "default", diff --git a/pkg/controllers/applicationfailover/crb_application_failover_controller_test.go b/pkg/controllers/applicationfailover/crb_application_failover_controller_test.go new file mode 100644 index 000000000000..51398b26c92e --- /dev/null +++ b/pkg/controllers/applicationfailover/crb_application_failover_controller_test.go @@ -0,0 +1,377 @@ +package applicationfailover + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/gclient" + "github.com/karmada-io/karmada/pkg/util/helper" +) + +func generateCRBApplicationFailoverController() *CRBApplicationFailoverController { + m := newWorkloadUnhealthyMap() + c := &CRBApplicationFailoverController{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + EventRecorder: &record.FakeRecorder{}, + workloadUnhealthyMap: m, + } + return c +} + +func TestCRBApplicationFailoverController_Reconcile(t *testing.T) { + t.Run("failed in clusterResourceBindingFilter", func(t *testing.T) { + binding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + } + c := generateCRBApplicationFailoverController() + + // Prepare req + req := controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: "binding", + Namespace: "default", + }, + } + + if err := c.Client.Create(context.Background(), binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + + res, err := c.Reconcile(context.Background(), req) + assert.Equal(t, controllerruntime.Result{}, res) + assert.Equal(t, nil, err) + }) + + t.Run("failed in c.Client.Get", func(t *testing.T) { + c := generateCRBApplicationFailoverController() + + // Prepare req + req := controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: "binding", + Namespace: "default", + }, + } + + res, err := c.Reconcile(context.Background(), req) + assert.Equal(t, controllerruntime.Result{}, res) + assert.Equal(t, nil, err) + }) +} + +func TestCRBApplicationFailoverController_detectFailure(t *testing.T) { + cluster1 := "cluster1" + cluster2 := "cluster2" + key := types.NamespacedName{ + Namespace: "default", + Name: "test", + } + + t.Run("hasWorkloadBeenUnhealthy return false", func(t *testing.T) { + clusters := []string{cluster1, cluster2} + tolerationSeconds := int32(1) + + c := generateCRBApplicationFailoverController() + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, tolerationSeconds, duration) + assert.Equal(t, []string(nil), needEvictClusters) + }) + + t.Run("more than the tolerance time", func(t *testing.T) { + clusters := []string{cluster1, cluster2} + tolerationSeconds := int32(1) + + c := generateCRBApplicationFailoverController() + c.workloadUnhealthyMap.setTimeStamp(key, cluster1) + time.Sleep(2 * time.Second) + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, tolerationSeconds, duration) + assert.Equal(t, []string{"cluster1"}, needEvictClusters) + }) + + t.Run("less than the tolerance time", func(t *testing.T) { + clusters := []string{cluster1, cluster2} + tolerationSeconds := int32(100) + + c := generateCRBApplicationFailoverController() + c.workloadUnhealthyMap.setTimeStamp(key, cluster1) + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, tolerationSeconds, duration) + assert.Equal(t, []string(nil), needEvictClusters) + }) + + t.Run("final duration is 0", func(t *testing.T) { + clusters := []string{} + tolerationSeconds := int32(100) + + c := generateCRBApplicationFailoverController() + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, int32(0), duration) + assert.Equal(t, []string(nil), needEvictClusters) + }) +} + +func TestCRBApplicationFailoverController_syncBinding(t *testing.T) { + tolerationSeconds := int32(5) + c := generateCRBApplicationFailoverController() + binding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: &tolerationSeconds, + }, + }, + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 1, + }, + { + Name: "member1", + Replicas: 2, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "member1", + Health: workv1alpha2.ResourceHealthy, + }, + { + ClusterName: "member2", + Health: workv1alpha2.ResourceUnhealthy, + }, + }, + }, + } + + dur, err := c.syncBinding(binding) + assert.Equal(t, 5*time.Second, dur) + assert.NoError(t, err) +} + +func TestCRBApplicationFailoverController_evictBinding(t *testing.T) { + tests := []struct { + name string + purgeMode policyv1alpha1.PurgeMode + expectError bool + }{ + { + name: "PurgeMode is Graciously", + purgeMode: policyv1alpha1.Graciously, + expectError: false, + }, + { + name: "PurgeMode is Never", + purgeMode: policyv1alpha1.Never, + expectError: false, + }, + { + name: "PurgeMode is Immediately", + purgeMode: policyv1alpha1.Immediately, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateCRBApplicationFailoverController() + binding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + PurgeMode: tt.purgeMode, + }, + }, + }, + } + clusters := []string{"member1", "member2"} + err := c.evictBinding(binding, clusters) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestCRBApplicationFailoverController_updateBinding(t *testing.T) { + binding := &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + } + allClusters := sets.New("member1", "member2", "member3") + needEvictClusters := []string{"member1", "member2"} + + c := generateCRBApplicationFailoverController() + + t.Run("failed when c.Update", func(t *testing.T) { + err := c.updateBinding(binding, allClusters, needEvictClusters) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + if err := c.Client.Create(context.Background(), binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + err := c.updateBinding(binding, allClusters, needEvictClusters) + assert.NoError(t, err) + }) +} + +func generateRaw() *runtime.RawExtension { + testTime := time.Now() + testV1time := metav1.NewTime(testTime) + statusMap := map[string]interface{}{ + "active": 0, + "succeeded": 1, + "startTime": testV1time, + "completionTime": testV1time, + "failed": 0, + "conditions": []batchv1.JobCondition{{Type: batchv1.JobComplete, Status: corev1.ConditionTrue}}, + } + raw, _ := helper.BuildStatusRawExtension(statusMap) + return raw +} + +func TestCRBApplicationFailoverController_clusterResourceBindingFilter(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ClusterResourceBinding + expectRes bool + }{ + { + name: "crb.Spec.Failover and crb.Spec.Failover.Application is nil", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + expectRes: false, + }, + { + name: "crb.Status.AggregatedStatus is 0", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{}, + }, + }, + }, + expectRes: false, + }, + { + name: "error occurs in ConstructClusterWideKey", + binding: &workv1alpha2.ClusterResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "a/b/c", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{}, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "memberA", Status: generateRaw()}, + {ClusterName: "memberB", Status: generateRaw()}, + }, + }, + }, + expectRes: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateCRBApplicationFailoverController() + res := c.clusterResourceBindingFilter(tt.binding) + assert.Equal(t, tt.expectRes, res) + }) + } +} diff --git a/pkg/controllers/applicationfailover/rb_application_failover_controller_test.go b/pkg/controllers/applicationfailover/rb_application_failover_controller_test.go new file mode 100644 index 000000000000..dd291cf7353b --- /dev/null +++ b/pkg/controllers/applicationfailover/rb_application_failover_controller_test.go @@ -0,0 +1,358 @@ +package applicationfailover + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/record" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + policyv1alpha1 "github.com/karmada-io/karmada/pkg/apis/policy/v1alpha1" + workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2" + "github.com/karmada-io/karmada/pkg/util/gclient" +) + +func generateRBApplicationFailoverController() *RBApplicationFailoverController { + m := newWorkloadUnhealthyMap() + c := &RBApplicationFailoverController{ + Client: fake.NewClientBuilder().WithScheme(gclient.NewSchema()).Build(), + EventRecorder: &record.FakeRecorder{}, + workloadUnhealthyMap: m, + } + return c +} + +func TestRBApplicationFailoverController_Reconcile(t *testing.T) { + t.Run("failed in bindingFilter", func(t *testing.T) { + binding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + } + c := generateRBApplicationFailoverController() + + // Prepare req + req := controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: "binding", + Namespace: "default", + }, + } + + if err := c.Client.Create(context.Background(), binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + + res, err := c.Reconcile(context.Background(), req) + assert.Equal(t, controllerruntime.Result{}, res) + assert.Equal(t, nil, err) + }) + + t.Run("failed in c.Client.Get", func(t *testing.T) { + c := generateRBApplicationFailoverController() + + // Prepare req + req := controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Name: "binding", + Namespace: "default", + }, + } + + res, err := c.Reconcile(context.Background(), req) + assert.Equal(t, controllerruntime.Result{}, res) + assert.Equal(t, nil, err) + }) +} + +func TestRBApplicationFailoverController_detectFailure(t *testing.T) { + cluster1 := "cluster1" + cluster2 := "cluster2" + key := types.NamespacedName{ + Namespace: "default", + Name: "test", + } + + t.Run("hasWorkloadBeenUnhealthy return false", func(t *testing.T) { + clusters := []string{cluster1, cluster2} + tolerationSeconds := int32(1) + + c := generateRBApplicationFailoverController() + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, tolerationSeconds, duration) + assert.Equal(t, []string(nil), needEvictClusters) + }) + + t.Run("more than the tolerance time", func(t *testing.T) { + clusters := []string{cluster1, cluster2} + tolerationSeconds := int32(1) + + c := generateRBApplicationFailoverController() + c.workloadUnhealthyMap.setTimeStamp(key, cluster1) + time.Sleep(2 * time.Second) + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, tolerationSeconds, duration) + assert.Equal(t, []string{"cluster1"}, needEvictClusters) + }) + + t.Run("less than the tolerance time", func(t *testing.T) { + clusters := []string{cluster1, cluster2} + tolerationSeconds := int32(100) + + c := generateRBApplicationFailoverController() + c.workloadUnhealthyMap.setTimeStamp(key, cluster1) + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, tolerationSeconds, duration) + assert.Equal(t, []string(nil), needEvictClusters) + }) + + t.Run("final duration is 0", func(t *testing.T) { + clusters := []string{} + tolerationSeconds := int32(100) + + c := generateRBApplicationFailoverController() + duration, needEvictClusters := c.detectFailure(clusters, &tolerationSeconds, key) + assert.Equal(t, int32(0), duration) + assert.Equal(t, []string(nil), needEvictClusters) + }) +} + +func TestRBApplicationFailoverController_syncBinding(t *testing.T) { + tolerationSeconds := int32(5) + c := generateRBApplicationFailoverController() + binding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + DecisionConditions: policyv1alpha1.DecisionConditions{ + TolerationSeconds: &tolerationSeconds, + }, + }, + }, + Clusters: []workv1alpha2.TargetCluster{ + { + Name: "member1", + Replicas: 1, + }, + { + Name: "member1", + Replicas: 2, + }, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + { + ClusterName: "member1", + Health: workv1alpha2.ResourceHealthy, + }, + { + ClusterName: "member2", + Health: workv1alpha2.ResourceUnhealthy, + }, + }, + }, + } + + dur, err := c.syncBinding(binding) + assert.Equal(t, 5*time.Second, dur) + assert.NoError(t, err) +} + +func TestRBApplicationFailoverController_evictBinding(t *testing.T) { + tests := []struct { + name string + purgeMode policyv1alpha1.PurgeMode + expectError bool + }{ + { + name: "PurgeMode is Graciously", + purgeMode: policyv1alpha1.Graciously, + expectError: false, + }, + { + name: "PurgeMode is Never", + purgeMode: policyv1alpha1.Never, + expectError: false, + }, + { + name: "PurgeMode is Immediately", + purgeMode: policyv1alpha1.Immediately, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateRBApplicationFailoverController() + binding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{ + PurgeMode: tt.purgeMode, + }, + }, + }, + } + clusters := []string{"member1", "member2"} + err := c.evictBinding(binding, clusters) + if tt.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func TestRBApplicationFailoverController_updateBinding(t *testing.T) { + binding := &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + } + allClusters := sets.New("member1", "member2", "member3") + needEvictClusters := []string{"member1", "member2"} + + c := generateRBApplicationFailoverController() + + t.Run("failed when c.Update", func(t *testing.T) { + err := c.updateBinding(binding, allClusters, needEvictClusters) + assert.Error(t, err) + }) + + t.Run("normal case", func(t *testing.T) { + if err := c.Client.Create(context.Background(), binding); err != nil { + t.Fatalf("Failed to create binding: %v", err) + } + err := c.updateBinding(binding, allClusters, needEvictClusters) + assert.NoError(t, err) + }) +} + +func TestRBApplicationFailoverController_clusterResourceBindingFilter(t *testing.T) { + tests := []struct { + name string + binding *workv1alpha2.ResourceBinding + expectRes bool + }{ + { + name: "crb.Spec.Failover and crb.Spec.Failover.Application is nil", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + }, + }, + expectRes: false, + }, + { + name: "crb.Status.AggregatedStatus is 0", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "v1", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{}, + }, + }, + }, + expectRes: false, + }, + { + name: "error occurs in ConstructClusterWideKey", + binding: &workv1alpha2.ResourceBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: "binding", + Namespace: "default", + }, + Spec: workv1alpha2.ResourceBindingSpec{ + Resource: workv1alpha2.ObjectReference{ + APIVersion: "a/b/c", + Kind: "Pod", + Namespace: "default", + Name: "pod", + }, + Failover: &policyv1alpha1.FailoverBehavior{ + Application: &policyv1alpha1.ApplicationFailoverBehavior{}, + }, + }, + Status: workv1alpha2.ResourceBindingStatus{ + AggregatedStatus: []workv1alpha2.AggregatedStatusItem{ + {ClusterName: "memberA", Status: generateRaw()}, + {ClusterName: "memberB", Status: generateRaw()}, + }, + }, + }, + expectRes: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := generateRBApplicationFailoverController() + res := c.bindingFilter(tt.binding) + assert.Equal(t, tt.expectRes, res) + }) + } +}