diff --git a/pkg/common/testing/assertion.go b/pkg/common/testing/assertion.go index 34ec66615..6aca9f9f5 100644 --- a/pkg/common/testing/assertion.go +++ b/pkg/common/testing/assertion.go @@ -51,7 +51,7 @@ func AssertErrorWithPrefix(t *testing.T, actual error, expectedErrorPrefix strin func AssertActions(t *testing.T, actualActions []clienttesting.Action, expectedVerbs ...string) { t.Helper() if len(actualActions) != len(expectedVerbs) { - t.Fatalf("expected %d call but got: %#v", len(expectedVerbs), actualActions) + t.Fatalf("expected %d call but got %d: %#v", len(expectedVerbs), len(actualActions), actualActions) } for i, expected := range expectedVerbs { if actualActions[i].GetVerb() != expected { diff --git a/pkg/work/helper/helpers.go b/pkg/work/helper/helpers.go index 49f21b704..aab7b78d9 100644 --- a/pkg/work/helper/helpers.go +++ b/pkg/work/helper/helpers.go @@ -19,12 +19,16 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/dynamic" "k8s.io/klog/v2" + clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" workapiv1 "open-cluster-management.io/api/work/v1" ) @@ -467,3 +471,25 @@ func BuildResourceMeta( resourceMeta.Resource = mapping.Resource.Resource return resourceMeta, mapping.Resource, err } + +type PlacementDecisionGetter struct { + Client clusterlister.PlacementDecisionLister +} + +func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace string) ([]*clusterv1beta1.PlacementDecision, error) { + return pdl.Client.PlacementDecisions(namespace).List(selector) +} + +// Get added and deleted clusters names +func GetClusters(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement, + existingClusters sets.Set[string]) (sets.Set[string], sets.Set[string], error) { + pdtracker := GetPlacementTracker(client, placement, existingClusters) + + return pdtracker.GetClusterChanges() +} + +func GetPlacementTracker(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement, + existingClusters sets.Set[string]) *clusterv1beta1.PlacementDecisionClustersTracker { + + return clusterv1beta1.NewPlacementDecisionClustersTracker(placement, PlacementDecisionGetter{Client: client}, existingClusters) +} diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go index 65f5220ee..aab4e5073 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go @@ -37,6 +37,10 @@ const ( // TODO move this to the api repo ManifestWorkReplicaSetControllerNameLabelKey = "work.open-cluster-management.io/manifestworkreplicaset" + // ManifestWorkReplicaSetPlacementNameLabelKey is the label key on manifestwork to ref to the Placement that select + // the managedCluster on the manifestWorkReplicaSet's PlacementRef. + ManifestWorkReplicaSetPlacementNameLabelKey = "work.open-cluster-management.io/placementname" + // ManifestWorkReplicaSetFinalizer is the name of the finalizer added to ManifestWorkReplicaSet. It is used to ensure // related manifestworks is deleted ManifestWorkReplicaSetFinalizer = "work.open-cluster-management.io/manifest-work-cleanup" @@ -124,7 +128,7 @@ func newController(workClient workclientset.Interface, } } -// sync is the main reconcile loop for placeManifest work. It is triggered every 15sec +// sync is the main reconcile loop for ManifestWorkReplicaSet. It is triggered every 15sec func (m *ManifestWorkReplicaSetController) sync(ctx context.Context, controllerContext factory.SyncContext) error { key := controllerContext.QueueKey() klog.V(4).Infof("Reconciling ManifestWorkReplicaSet %q", key) @@ -180,3 +184,19 @@ func listManifestWorksByManifestWorkReplicaSet(mwrs *workapiv1alpha1.ManifestWor selector := labels.NewSelector().Add(*req) return manifestWorkLister.List(selector) } + +func listManifestWorksByMWRSetPlacementRef(mwrs *workapiv1alpha1.ManifestWorkReplicaSet, placementName string, + manifestWorkLister worklisterv1.ManifestWorkLister) ([]*workapiv1.ManifestWork, error) { + reqMWRSet, err := labels.NewRequirement(ManifestWorkReplicaSetControllerNameLabelKey, selection.Equals, []string{manifestWorkReplicaSetKey(mwrs)}) + if err != nil { + return nil, err + } + + reqPlacementRef, err := labels.NewRequirement(ManifestWorkReplicaSetPlacementNameLabelKey, selection.Equals, []string{placementName}) + if err != nil { + return nil, err + } + + selector := labels.NewSelector().Add(*reqMWRSet, *reqPlacementRef) + return manifestWorkLister.List(selector) +} diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go index 659841a53..a5eec6c1e 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controllers_test.go @@ -150,7 +150,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) { w.Finalizers = []string{ManifestWorkReplicaSetFinalizer} return w }(), - works: helpertest.CreateTestManifestWorks("test", "default", "cluster1", "cluster2"), + works: helpertest.CreateTestManifestWorks("test", "default", "placement", "cluster1", "cluster2"), placement: func() *clusterv1beta1.Placement { p, _ := helpertest.CreateTestPlacement("placement", "default", "cluster1", "cluster2") return p @@ -181,7 +181,7 @@ func TestManifestWorkReplicaSetControllerPatchStatus(t *testing.T) { w.Finalizers = []string{ManifestWorkReplicaSetFinalizer} return w }(), - works: helpertest.CreateTestManifestWorks("test", "default", "cluster1", "cluster2"), + works: helpertest.CreateTestManifestWorks("test", "default", "placement", "cluster1", "cluster2"), placement: func() *clusterv1beta1.Placement { p, _ := helpertest.CreateTestPlacement("placement", "default", "cluster2", "cluster3", "cluster4") return p diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go index 184c747b6..097ccb82d 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go @@ -12,12 +12,12 @@ import ( clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" worklisterv1 "open-cluster-management.io/api/client/work/listers/work/v1" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" "open-cluster-management.io/api/utils/work/v1/workapplier" workv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" - "open-cluster-management.io/ocm/pkg/common/helpers" + "open-cluster-management.io/ocm/pkg/work/helper" ) // deployReconciler is to manage ManifestWork based on the placement. @@ -31,90 +31,126 @@ type deployReconciler struct { func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, ) (*workapiv1alpha1.ManifestWorkReplicaSet, reconcileState, error) { // Manifestwork create/update/delete logic. - var placements []*clusterv1beta1.Placement + var errs []error + var plcsSummary []workapiv1alpha1.PlacementSummary + count, total := 0, 0 + // Getting the placements and the created ManifestWorks related to each placement for _, placementRef := range mwrSet.Spec.PlacementRefs { + var existingRolloutClsStatus []clusterv1alpha1.ClusterRolloutStatus + existingClusterNames := sets.New[string]() placement, err := d.placementLister.Placements(mwrSet.Namespace).Get(placementRef.Name) + if errors.IsNotFound(err) { apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonPlacementDecisionNotFound, "")) return mwrSet, reconcileStop, nil } + if err != nil { return mwrSet, reconcileContinue, fmt.Errorf("failed get placement %w", err) } - placements = append(placements, placement) - } - manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(mwrSet, d.manifestWorkLister) - if err != nil { - return mwrSet, reconcileContinue, err - } + manifestWorks, err := listManifestWorksByMWRSetPlacementRef(mwrSet, placementRef.Name, d.manifestWorkLister) + if err != nil { + return mwrSet, reconcileContinue, err + } - var errs []error - addedClusters, deletedClusters, existingClusters := sets.New[string](), sets.New[string](), sets.New[string]() - for _, mw := range manifestWorks { - existingClusters.Insert(mw.Namespace) - } + for _, mw := range manifestWorks { + // Check if ManifestWorkTemplate changes, ManifestWork will need to be updated. + newMW := &workv1.ManifestWork{} + mw.ObjectMeta.DeepCopyInto(&newMW.ObjectMeta) + mwrSet.Spec.ManifestWorkTemplate.DeepCopyInto(&newMW.Spec) + + // TODO: Create NeedToApply function by workApplier to check the manifestWork->spec hash value from the cache. + if !workapplier.ManifestWorkEqual(newMW, mw) { + continue + } + + existingClusterNames.Insert(mw.Namespace) + rolloutClusterStatus, err := d.clusterRolloutStatusFunc(mw.Namespace, *mw) - for _, placement := range placements { - added, deleted, err := helpers.GetClusterChanges(d.placeDecisionLister, placement, existingClusters) + if err != nil { + errs = append(errs, err) + continue + } + existingRolloutClsStatus = append(existingRolloutClsStatus, rolloutClusterStatus) + } + + placeTracker := helper.GetPlacementTracker(d.placeDecisionLister, placement, existingClusterNames) + rolloutHandler, err := clusterv1alpha1.NewRolloutHandler(placeTracker, d.clusterRolloutStatusFunc) if err != nil { apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, "")) return mwrSet, reconcileContinue, utilerrors.NewAggregate(errs) } - addedClusters = addedClusters.Union(added) - deletedClusters = deletedClusters.Union(deleted) - } - - // Create manifestWork for added clusters - for cls := range addedClusters { - mw, err := CreateManifestWork(mwrSet, cls) + err = placeTracker.Refresh() if err != nil { errs = append(errs, err) continue } - _, err = d.workApplier.Apply(ctx, mw) + _, rolloutResult, err := rolloutHandler.GetRolloutCluster(placementRef.RolloutStrategy, existingRolloutClsStatus) + if err != nil { errs = append(errs, err) + apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonNotAsExpected, "")) + + continue } - } - // Update manifestWorks in case there are changes at ManifestWork or ManifestWorkReplicaSet - for cls := range existingClusters { - // Delete manifestWork for deleted clusters - if deletedClusters.Has(cls) { - err = d.workApplier.Delete(ctx, cls, mwrSet.Name) + // Create ManifestWorks + for _, rolloutStatue := range rolloutResult.ClustersToRollout { + if rolloutStatue.Status == clusterv1alpha1.ToApply { + mw, err := CreateManifestWork(mwrSet, rolloutStatue.ClusterName, placementRef.Name) + if err != nil { + errs = append(errs, err) + continue + } + + _, err = d.workApplier.Apply(ctx, mw) + if err != nil { + errs = append(errs, err) + } + if !existingClusterNames.Has(rolloutStatue.ClusterName) { + existingClusterNames.Insert(rolloutStatue.ClusterName) + } + } + } + + for _, cls := range rolloutResult.ClustersRemoved { + // Delete manifestWork for removed clusters + err = d.workApplier.Delete(ctx, cls.ClusterName, mwrSet.Name) if err != nil { errs = append(errs, err) + continue } - continue + existingClusterNames.Delete(cls.ClusterName) } - mw, err := CreateManifestWork(mwrSet, cls) - if err != nil { - errs = append(errs, err) - continue + total = total + int(placement.Status.NumberOfSelectedClusters) + plcSummary := workapiv1alpha1.PlacementSummary{ + Name: placementRef.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement.Status.DecisionGroups), + len(existingClusterNames), placement.Status.NumberOfSelectedClusters), } - - _, err = d.workApplier.Apply(ctx, mw) - if err != nil { - errs = append(errs, err) + mwrSetSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(existingClusterNames), } + plcSummary.Summary = mwrSetSummary + plcsSummary = append(plcsSummary, plcSummary) + + count = count + len(existingClusterNames) } + // Set the placements summary + mwrSet.Status.PlacementsSummary = plcsSummary // Set the Summary if mwrSet.Status.Summary == (workapiv1alpha1.ManifestWorkReplicaSetSummary{}) { mwrSet.Status.Summary = workapiv1alpha1.ManifestWorkReplicaSetSummary{} } - total := len(existingClusters) - len(deletedClusters) + len(addedClusters) - if total < 0 { - total = 0 - } - mwrSet.Status.Summary.Total = total - if total == 0 { + mwrSet.Status.Summary.Total = count + if count == 0 { mwrSet.Status.Summary.Applied = 0 mwrSet.Status.Summary.Available = 0 mwrSet.Status.Summary.Degraded = 0 @@ -124,9 +160,54 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementDecisionVerified(workapiv1alpha1.ReasonAsExpected, "")) } + if total == count { + apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementRollOut(workapiv1alpha1.ReasonComplete, "")) + } else { + apimeta.SetStatusCondition(&mwrSet.Status.Conditions, GetPlacementRollOut(workapiv1alpha1.ReasonProgressing, "")) + } + return mwrSet, reconcileContinue, utilerrors.NewAggregate(errs) } +func (d *deployReconciler) clusterRolloutStatusFunc(clusterName string, manifestWork workv1.ManifestWork) (clusterv1alpha1.ClusterRolloutStatus, error) { + clsRolloutStatus := clusterv1alpha1.ClusterRolloutStatus{ + ClusterName: clusterName, + LastTransitionTime: &manifestWork.CreationTimestamp, + // Default status is ToApply + Status: clusterv1alpha1.ToApply, + } + + appliedCondition := apimeta.FindStatusCondition(manifestWork.Status.Conditions, workv1.WorkApplied) + + // Applied condition not exist return status as ToApply. + if appliedCondition == nil { + return clsRolloutStatus, nil + } else if appliedCondition.Status == metav1.ConditionTrue || + apimeta.IsStatusConditionTrue(manifestWork.Status.Conditions, workv1.WorkProgressing) { + // Applied OR Progressing conditions status true return status as Progressing + // ManifestWork Progressing status is not defined however the check is made for future work availability. + clsRolloutStatus.Status = clusterv1alpha1.Progressing + } else if appliedCondition.Status == metav1.ConditionFalse { + // Applied Condition status false return status as failed + clsRolloutStatus.Status = clusterv1alpha1.Failed + return clsRolloutStatus, nil + } + + // Available condition return status as Succeeded + if apimeta.IsStatusConditionTrue(manifestWork.Status.Conditions, workv1.WorkAvailable) { + clsRolloutStatus.Status = clusterv1alpha1.Succeeded + return clsRolloutStatus, nil + } + + // Degraded condition return status as Failed + // ManifestWork Degraded status is not defined however the check is made for future work availability. + if apimeta.IsStatusConditionTrue(manifestWork.Status.Conditions, workv1.WorkDegraded) { + clsRolloutStatus.Status = clusterv1alpha1.Failed + } + + return clsRolloutStatus, nil +} + // GetManifestworkApplied return only True status if there all clusters have manifests applied as expected func GetManifestworkApplied(reason string, message string) metav1.Condition { if reason == workapiv1alpha1.ReasonAsExpected { @@ -146,6 +227,15 @@ func GetPlacementDecisionVerified(reason string, message string) metav1.Conditio return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified, reason, message, metav1.ConditionFalse) } +// GetPlacementRollout return only True status if there are clusters selected +func GetPlacementRollOut(reason string, message string) metav1.Condition { + if reason == workapiv1alpha1.ReasonComplete { + return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, reason, message, metav1.ConditionTrue) + } + + return getCondition(workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut, reason, message, metav1.ConditionFalse) +} + func getCondition(conditionType string, reason string, message string, status metav1.ConditionStatus) metav1.Condition { return metav1.Condition{ Type: conditionType, @@ -156,7 +246,7 @@ func getCondition(conditionType string, reason string, message string, status me } } -func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterNS string) (*workv1.ManifestWork, error) { +func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterNS string, placementRefName string) (*workv1.ManifestWork, error) { if clusterNS == "" { return nil, fmt.Errorf("invalid cluster namespace") } @@ -165,7 +255,12 @@ func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterN ObjectMeta: metav1.ObjectMeta{ Name: mwrSet.Name, Namespace: clusterNS, - Labels: map[string]string{ManifestWorkReplicaSetControllerNameLabelKey: manifestWorkReplicaSetKey(mwrSet)}, + Labels: map[string]string{ManifestWorkReplicaSetControllerNameLabelKey: manifestWorkReplicaSetKey(mwrSet), + ManifestWorkReplicaSetPlacementNameLabelKey: placementRefName}, }, Spec: mwrSet.Spec.ManifestWorkTemplate}, nil } + +func getAvailableDecisionGroupProgressMessage(groupNum int, existingClsCount int, totalCls int32) string { + return fmt.Sprintf("%d (%d / %d clusters applied)", groupNum, existingClsCount, totalCls) +} diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go index 45cd37552..c4c4771e8 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -12,6 +13,7 @@ import ( clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions" fakeworkclient "open-cluster-management.io/api/client/work/clientset/versioned/fake" workinformers "open-cluster-management.io/api/client/work/informers/externalversions" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" "open-cluster-management.io/api/utils/work/v1/workapplier" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" @@ -20,7 +22,7 @@ import ( func TestDeployReconcileAsExpected(t *testing.T) { mwrSet := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test") - mw, _ := CreateManifestWork(mwrSet, "cls1") + mw, _ := CreateManifestWork(mwrSet, "cls1", "place-test") fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mw) workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) @@ -200,3 +202,382 @@ func TestDeployReconcileAsPlacementNotExist(t *testing.T) { t.Fatal("Placement condition Reason not match PlacementDecisionEmpty ", placeCondition) } } + +func TestDeployWithRolloutStrategyReconcileAsExpected(t *testing.T) { + // create placement with 5 clusters and 2 groups + clusters := []string{"cls1", "cls2", "cls3", "cls4", "cls5"} + clsPerGroup := 3 + placement, placementDecisions := helpertest.CreateTestPlacementWithDecisionStrategy("place-test", "default", clsPerGroup, clusters...) + fClusterClient := fakeclusterclient.NewSimpleClientset(placement, placementDecisions[0]) + clusterInformerFactory := clusterinformers.NewSharedInformerFactoryWithOptions(fClusterClient, 1*time.Second) + err := clusterInformerFactory.Cluster().V1beta1().Placements().Informer().GetStore().Add(placement) + assert.Nil(t, err) + + for _, plcDecision := range placementDecisions { + err = clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(plcDecision) + assert.Nil(t, err) + } + placementLister := clusterInformerFactory.Cluster().V1beta1().Placements().Lister() + placementDecisionLister := clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Lister() + perGoupeRollOut := clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.ProgressivePerGroup, + ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + } + mwrSet := helpertest.CreateTestManifestWorkReplicaSetWithRollOutStrategy("mwrSet-test", "default", + map[string]clusterv1alpha1.RolloutStrategy{placement.Name: perGoupeRollOut}) + mw := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement.Name, "cls1") + fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mw) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) + + err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + + err = workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(mwrSet) + assert.Nil(t, err) + + mwLister := workInformerFactory.Work().V1().ManifestWorks().Lister() + pmwDeployController := deployReconciler{ + workApplier: workapplier.NewWorkApplierWithTypedClient(fWorkClient, mwLister), + manifestWorkLister: mwLister, + placeDecisionLister: placementDecisionLister, + placementLister: placementLister, + } + + mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet) + assert.Nil(t, err) + assert.Equal(t, len(mwrSet.Status.PlacementsSummary), len(mwrSet.Spec.PlacementRefs)) + + expectPlcSummary := workapiv1alpha1.PlacementSummary{ + Name: placement.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement.Status.DecisionGroups), + clsPerGroup, placement.Status.NumberOfSelectedClusters), + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: clsPerGroup, + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + }, + } + actualPlcSummary := mwrSet.Status.PlacementsSummary[0] + assert.Equal(t, actualPlcSummary, expectPlcSummary) + + // Check for the expected manifestWorkReplicaSetSummary + mwrSetSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: clsPerGroup, + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSet.Status.Summary, mwrSetSummary) + + // Check the PlacedManifestWork conditions + placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) + assert.NotNil(t, placeCondition) + assert.Equal(t, placeCondition.Status, metav1.ConditionTrue) + assert.Equal(t, placeCondition.Reason, workapiv1alpha1.ReasonAsExpected) + + // Check the RollOut conditions + rollOutCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut) + assert.NotNil(t, rollOutCondition) + assert.Equal(t, rollOutCondition.Status, metav1.ConditionFalse) + assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonProgressing) + + // re-run deploy to have all clusters manifestWorks created. + mw = helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, "place-test", "cls2") + err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + + mw = helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, "place-test", "cls3") + err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + + mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet) + assert.Nil(t, err) + assert.Equal(t, len(mwrSet.Status.PlacementsSummary), len(mwrSet.Spec.PlacementRefs)) + + expectPlcSummary = workapiv1alpha1.PlacementSummary{ + Name: placement.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement.Status.DecisionGroups), + int(placement.Status.NumberOfSelectedClusters), placement.Status.NumberOfSelectedClusters), + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: int(placement.Status.NumberOfSelectedClusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + }, + } + actualPlcSummary = mwrSet.Status.PlacementsSummary[0] + assert.Equal(t, expectPlcSummary, actualPlcSummary) + + // Check for the expected manifestWorkReplicaSetSummary + mwrSetSummary = workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSet.Status.Summary, mwrSetSummary) + + // Check the RollOut conditions + rollOutCondition = apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut) + assert.NotNil(t, rollOutCondition) + assert.Equal(t, rollOutCondition.Status, metav1.ConditionTrue) + assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonComplete) +} + +func TestDeployWithMultiPlacementsReconcileAsExpected(t *testing.T) { + placement1, placementDecision1 := helpertest.CreateTestPlacement("place-test1", "default", "cls6", "cls7") + // create placement with 5 clusters and 2 groups + clusters := []string{"cls1", "cls2", "cls3", "cls4", "cls5"} + clsPerGroup := 3 + placement2, placementDecisions2 := helpertest.CreateTestPlacementWithDecisionStrategy("place-test2", "default", clsPerGroup, clusters...) + fClusterClient := fakeclusterclient.NewSimpleClientset(placement1, placementDecision1) + clusterInformerFactory := clusterinformers.NewSharedInformerFactoryWithOptions(fClusterClient, 1*time.Second) + err := clusterInformerFactory.Cluster().V1beta1().Placements().Informer().GetStore().Add(placement1) + assert.Nil(t, err) + + err = clusterInformerFactory.Cluster().V1beta1().Placements().Informer().GetStore().Add(placement2) + assert.Nil(t, err) + + err = clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(placementDecision1) + assert.Nil(t, err) + + for _, plcDecision := range placementDecisions2 { + err = clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(plcDecision) + assert.Nil(t, err) + } + + placementLister := clusterInformerFactory.Cluster().V1beta1().Placements().Lister() + placementDecisionLister := clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Lister() + perGoupeRollOut := clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.ProgressivePerGroup, + ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + } + allRollOut := clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + } + + mwrSet := helpertest.CreateTestManifestWorkReplicaSetWithRollOutStrategy("mwrSet-test", "default", + map[string]clusterv1alpha1.RolloutStrategy{placement1.Name: allRollOut, placement2.Name: perGoupeRollOut}) + + mw := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement2.Name, "cls1") + fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet, mw) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) + + err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + + err = workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(mwrSet) + assert.Nil(t, err) + + mwLister := workInformerFactory.Work().V1().ManifestWorks().Lister() + pmwDeployController := deployReconciler{ + workApplier: workapplier.NewWorkApplierWithTypedClient(fWorkClient, mwLister), + manifestWorkLister: mwLister, + placeDecisionLister: placementDecisionLister, + placementLister: placementLister, + } + + mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet) + assert.Nil(t, err) + assert.Equal(t, len(mwrSet.Status.PlacementsSummary), len(mwrSet.Spec.PlacementRefs)) + + // Check placements summary + expectPlcSummary1 := workapiv1alpha1.PlacementSummary{ + Name: placement1.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement1.Status.DecisionGroups), + int(placement1.Status.NumberOfSelectedClusters), placement1.Status.NumberOfSelectedClusters), + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: int(placement1.Status.NumberOfSelectedClusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + }, + } + expectPlcSummary2 := workapiv1alpha1.PlacementSummary{ + Name: placement2.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement2.Status.DecisionGroups), + clsPerGroup, placement2.Status.NumberOfSelectedClusters), + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: clsPerGroup, + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + }, + } + for _, plcSummary := range mwrSet.Status.PlacementsSummary { + if plcSummary.Name == placement1.Name { + assert.Equal(t, plcSummary, expectPlcSummary1) + } else if plcSummary.Name == placement2.Name { + assert.Equal(t, plcSummary, expectPlcSummary2) + } else { + t.Fatal("PlacementSummary Name not Exist") + } + } + + // Check for the expected manifestWorkReplicaSetSummary + mwrSetSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: clsPerGroup + int(placement1.Status.NumberOfSelectedClusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSet.Status.Summary, mwrSetSummary) + + // Check the PlacedManifestWork conditions + placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) + assert.NotNil(t, placeCondition) + // Check placement condition status true + assert.Equal(t, placeCondition.Status, metav1.ConditionTrue) + // Check placement condition reason + assert.Equal(t, placeCondition.Reason, workapiv1alpha1.ReasonAsExpected) + + // Check the RollOut conditions + rollOutCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut) + assert.NotNil(t, rollOutCondition) + // Check placement condition status False + assert.Equal(t, rollOutCondition.Status, metav1.ConditionFalse) + // Check placement condition reason + assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonProgressing) +} + +func TestDeployMWRSetSpecChangesReconcile(t *testing.T) { + // create placement with 5 clusters and 2 groups + clusters := []string{"cls1", "cls2", "cls3", "cls4", "cls5"} + clsPerGroup := 3 + placement, placementDecisions := helpertest.CreateTestPlacementWithDecisionStrategy("place-test", "default", clsPerGroup, clusters...) + fClusterClient := fakeclusterclient.NewSimpleClientset(placement, placementDecisions[0]) + clusterInformerFactory := clusterinformers.NewSharedInformerFactoryWithOptions(fClusterClient, 1*time.Second) + err := clusterInformerFactory.Cluster().V1beta1().Placements().Informer().GetStore().Add(placement) + assert.Nil(t, err) + + for _, plcDecision := range placementDecisions { + err = clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Informer().GetStore().Add(plcDecision) + assert.Nil(t, err) + } + placementLister := clusterInformerFactory.Cluster().V1beta1().Placements().Lister() + placementDecisionLister := clusterInformerFactory.Cluster().V1beta1().PlacementDecisions().Lister() + perGoupeRollOut := clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.ProgressivePerGroup, + ProgressivePerGroup: &clusterv1alpha1.RolloutProgressivePerGroup{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + } + mwrSet := helpertest.CreateTestManifestWorkReplicaSetWithRollOutStrategy("mwrSet-test", "default", + map[string]clusterv1alpha1.RolloutStrategy{placement.Name: perGoupeRollOut}) + + fWorkClient := fakeworkclient.NewSimpleClientset(mwrSet) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) + // create manifestWorks + for i := 0; i < clsPerGroup; i++ { + mw := helpertest.CreateTestManifestWork(mwrSet.Name, mwrSet.Namespace, placement.Name, clusters[i]) + err = workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + } + + err = workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(mwrSet) + assert.Nil(t, err) + + mwLister := workInformerFactory.Work().V1().ManifestWorks().Lister() + pmwDeployController := deployReconciler{ + workApplier: workapplier.NewWorkApplierWithTypedClient(fWorkClient, mwLister), + manifestWorkLister: mwLister, + placeDecisionLister: placementDecisionLister, + placementLister: placementLister, + } + + mwrSet, _, err = pmwDeployController.reconcile(context.TODO(), mwrSet) + assert.Nil(t, err) + assert.Equal(t, len(mwrSet.Status.PlacementsSummary), len(mwrSet.Spec.PlacementRefs)) + + // expected all clusters manifestWork are created + expectPlcSummary := workapiv1alpha1.PlacementSummary{ + Name: placement.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement.Status.DecisionGroups), + int(placement.Status.NumberOfSelectedClusters), placement.Status.NumberOfSelectedClusters), + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: int(placement.Status.NumberOfSelectedClusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + }, + } + actualPlcSummary := mwrSet.Status.PlacementsSummary[0] + assert.Equal(t, actualPlcSummary, expectPlcSummary) + + // Check for the expected manifestWorkReplicaSetSummary + mwrSetSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: int(placement.Status.NumberOfSelectedClusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSet.Status.Summary, mwrSetSummary) + + // Check the PlacedManifestWork conditions + placeCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementVerified) + assert.NotNil(t, placeCondition) + assert.Equal(t, placeCondition.Status, metav1.ConditionTrue) + assert.Equal(t, placeCondition.Reason, workapiv1alpha1.ReasonAsExpected) + + // Check the RollOut conditions + rollOutCondition := apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut) + assert.NotNil(t, rollOutCondition) + assert.Equal(t, rollOutCondition.Status, metav1.ConditionTrue) + assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonComplete) + + // Change the mwrSet spec and re-run deploy. + mwTemplate := helpertest.CreateTestManifestWorkSpecWithSecret("v2", "test", "ns-test", "name-test") + mwTemplate.DeepCopyInto(&mwrSet.Spec.ManifestWorkTemplate) + + mwrSet, _, _ = pmwDeployController.reconcile(context.TODO(), mwrSet) + assert.Equal(t, len(mwrSet.Status.PlacementsSummary), len(mwrSet.Spec.PlacementRefs)) + + // As the mwrSet.Spec changed we expect to rollOut again with the num of clsPerGroup + expectPlcSummary = workapiv1alpha1.PlacementSummary{ + Name: placement.Name, + AvailableDecisionGroups: getAvailableDecisionGroupProgressMessage(len(placement.Status.DecisionGroups), + clsPerGroup, placement.Status.NumberOfSelectedClusters), + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: clsPerGroup, + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + }, + } + actualPlcSummary = mwrSet.Status.PlacementsSummary[0] + assert.Equal(t, expectPlcSummary, actualPlcSummary) + + // Check for the expected manifestWorkReplicaSetSummary + mwrSetSummary = workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: clsPerGroup, + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSet.Status.Summary, mwrSetSummary) + + // Check the RollOut conditions + rollOutCondition = apimeta.FindStatusCondition(mwrSet.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionPlacementRolledOut) + assert.NotNil(t, rollOutCondition) + assert.Equal(t, rollOutCondition.Status, metav1.ConditionFalse) + assert.Equal(t, rollOutCondition.Reason, workapiv1alpha1.ReasonProgressing) +} diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go index 15aa50b2a..dca85b72c 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalizer_test.go @@ -18,7 +18,7 @@ import ( // Test finalize reconcile func TestFinalizeReconcile(t *testing.T) { mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test") - mw, _ := CreateManifestWork(mwrSetTest, "cluster1") + mw, _ := CreateManifestWork(mwrSetTest, "cluster1", "place-test") fakeClient := fakeclient.NewSimpleClientset(mwrSetTest, mw) manifestWorkInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fakeClient, 1*time.Second) mwLister := manifestWorkInformerFactory.Work().V1().ManifestWorks().Lister() diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go index b811d0422..c08c3fbc3 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go @@ -18,7 +18,7 @@ import ( func TestPlaceMWControllerIndex(t *testing.T) { mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test") mwrSetTest.Status.Summary.Total = 1 - mw, _ := CreateManifestWork(mwrSetTest, "cls1") + mw, _ := CreateManifestWork(mwrSetTest, "cls1", "place-test") fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest, mw) workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go index a54eb8a81..331b412d5 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go @@ -6,6 +6,7 @@ import ( apimeta "k8s.io/apimachinery/pkg/api/meta" worklisterv1 "open-cluster-management.io/api/client/work/listers/work/v1" + "open-cluster-management.io/api/utils/work/v1/workapplier" workapiv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" ) @@ -29,33 +30,53 @@ func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha return mwrSet, reconcileContinue, nil } - manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(mwrSet, d.manifestWorkLister) - if err != nil { - return mwrSet, reconcileContinue, err - } - appliedCount, availableCount, degradCount, processingCount := 0, 0, 0, 0 - for _, mw := range manifestWorks { - if !mw.DeletionTimestamp.IsZero() { - continue + for id, plcSummary := range mwrSet.Status.PlacementsSummary { + manifestWorks, err := listManifestWorksByMWRSetPlacementRef(mwrSet, plcSummary.Name, d.manifestWorkLister) + if err != nil { + return mwrSet, reconcileContinue, err } - // applied condition - if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkApplied) { - appliedCount++ - } - // Progressing condition - if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkProgressing) { - processingCount++ - } - // Available condition - if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkAvailable) { - availableCount++ - } - // Degraded condition - if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkDegraded) { - degradCount++ + applied, available, degrad, processing := 0, 0, 0, 0 + for _, mw := range manifestWorks { + if !mw.DeletionTimestamp.IsZero() { + continue + } + + // Check if ManifestWorkTemplate changes, ManifestWork will need to be updated. + newMW := &workapiv1.ManifestWork{} + mw.ObjectMeta.DeepCopyInto(&newMW.ObjectMeta) + mwrSet.Spec.ManifestWorkTemplate.DeepCopyInto(&newMW.Spec) + if !workapplier.ManifestWorkEqual(newMW, mw) { + continue + } + + // applied condition + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkApplied) { + applied++ + } + // Progressing condition + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkProgressing) { + processing++ + } + // Available condition + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkAvailable) { + available++ + } + // Degraded condition + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkDegraded) { + degrad++ + } } + mwrSet.Status.PlacementsSummary[id].Summary.Applied = applied + mwrSet.Status.PlacementsSummary[id].Summary.Progressing = processing + mwrSet.Status.PlacementsSummary[id].Summary.Available = available + mwrSet.Status.PlacementsSummary[id].Summary.Degraded = degrad + // Set the manifestWorkReplicaSet count + appliedCount = appliedCount + applied + processingCount = processingCount + processing + availableCount = availableCount + available + degradCount = degradCount + degrad } mwrSet.Status.Summary.Available = availableCount diff --git a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go index a64ee757b..6ba49e8b3 100644 --- a/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go +++ b/pkg/work/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -17,9 +18,19 @@ import ( ) func TestStatusReconcileAsExpected(t *testing.T) { + plcName := "place-test" clusters := []string{"cls1", "cls2", "cls3", "cls4"} - mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test") + mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", plcName) mwrSetTest.Status.Summary.Total = len(clusters) + mwrSetTest.Status.PlacementsSummary = []workapiv1alpha1.PlacementSummary{ + { + Name: plcName, + AvailableDecisionGroups: "1", + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + }, + }, + } fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest) workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) @@ -29,7 +40,7 @@ func TestStatusReconcileAsExpected(t *testing.T) { } for _, cls := range clusters { - mw, _ := CreateManifestWork(mwrSetTest, cls) + mw, _ := CreateManifestWork(mwrSetTest, cls, plcName) cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) @@ -82,9 +93,19 @@ func TestStatusReconcileAsExpected(t *testing.T) { } func TestStatusReconcileAsProcessing(t *testing.T) { + plcName := "place-test" clusters := []string{"cls1", "cls2", "cls3", "cls4"} - mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test") + mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", plcName) mwrSetTest.Status.Summary.Total = len(clusters) + mwrSetTest.Status.PlacementsSummary = []workapiv1alpha1.PlacementSummary{ + { + Name: plcName, + AvailableDecisionGroups: "1", + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + }, + }, + } fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest) workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) @@ -94,7 +115,7 @@ func TestStatusReconcileAsProcessing(t *testing.T) { } for id, cls := range clusters { - mw, _ := CreateManifestWork(mwrSetTest, cls) + mw, _ := CreateManifestWork(mwrSetTest, cls, plcName) cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) @@ -153,9 +174,19 @@ func TestStatusReconcileAsProcessing(t *testing.T) { } func TestStatusReconcileNotAsExpected(t *testing.T) { + plcName := "place-test" clusters := []string{"cls1", "cls2", "cls3", "cls4"} - mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "place-test") + mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", plcName) mwrSetTest.Status.Summary.Total = len(clusters) + mwrSetTest.Status.PlacementsSummary = []workapiv1alpha1.PlacementSummary{ + { + Name: plcName, + AvailableDecisionGroups: "1", + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + }, + }, + } fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest) workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) @@ -166,7 +197,7 @@ func TestStatusReconcileNotAsExpected(t *testing.T) { avaCount, processingCount, degradCount := 0, 0, 0 for id, cls := range clusters { - mw, _ := CreateManifestWork(mwrSetTest, cls) + mw, _ := CreateManifestWork(mwrSetTest, cls, plcName) cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) @@ -229,3 +260,125 @@ func TestStatusReconcileNotAsExpected(t *testing.T) { t.Fatal("Applied condition Reason not match NotAsExpected ", appliedCondition) } } + +func TestStatusWithMultiPlacementsReconcileAsExpected(t *testing.T) { + placements := map[string][]string{"plc1": {"cls1", "cls2"}, "plc2": {"cls3", "cls4"}} + mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", "plc1", "plc2") + fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) + count := 0 + for plcName, clusters := range placements { + mwrSetTest.Status.PlacementsSummary = append(mwrSetTest.Status.PlacementsSummary, workapiv1alpha1.PlacementSummary{ + Name: plcName, + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + }}) + count = count + len(clusters) + for _, cls := range clusters { + mw := helpertest.CreateTestManifestWork(mwrSetTest.Name, mwrSetTest.Namespace, plcName, cls) + err := workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + } + } + mwrSetTest.Status.Summary.Total = count + err := workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(mwrSetTest) + assert.Nil(t, err) + + mwLister := workInformerFactory.Work().V1().ManifestWorks().Lister() + mwrSetStatusController := statusReconciler{ + manifestWorkLister: mwLister, + } + mwrSetTest, _, err = mwrSetStatusController.reconcile(context.TODO(), mwrSetTest) + assert.Nil(t, err) + + // Check for the expected Summary + mwrSetSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: count, + Applied: count, + Available: count, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSetTest.Status.Summary, mwrSetSummary) + + // Check the ManifestworkApplied conditions + appliedCondition := apimeta.FindStatusCondition(mwrSetTest.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied) + assert.NotNil(t, appliedCondition) + assert.Equal(t, appliedCondition.Status, metav1.ConditionTrue) + assert.Equal(t, appliedCondition.Reason, workapiv1alpha1.ReasonAsExpected) +} + +func TestStatusWithMWRSetSpecChangesReconcile(t *testing.T) { + plcName := "place-test" + clusters := []string{"cls1", "cls2", "cls3", "cls4"} + mwrSetTest := helpertest.CreateTestManifestWorkReplicaSet("mwrSet-test", "default", plcName) + mwrSetTest.Status.Summary.Total = len(clusters) + mwrSetTest.Status.PlacementsSummary = []workapiv1alpha1.PlacementSummary{ + { + Name: plcName, + AvailableDecisionGroups: "1", + Summary: workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + }, + }, + } + + fWorkClient := fakeworkclient.NewSimpleClientset(mwrSetTest) + workInformerFactory := workinformers.NewSharedInformerFactoryWithOptions(fWorkClient, 1*time.Second) + + err := workInformerFactory.Work().V1alpha1().ManifestWorkReplicaSets().Informer().GetStore().Add(mwrSetTest) + assert.Nil(t, err) + + for _, cls := range clusters { + mw := helpertest.CreateTestManifestWork(mwrSetTest.Name, mwrSetTest.Namespace, plcName, cls) + workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw) + assert.Nil(t, err) + } + + mwLister := workInformerFactory.Work().V1().ManifestWorks().Lister() + mwrSetStatusController := statusReconciler{ + manifestWorkLister: mwLister, + } + + mwrSetTest, _, err = mwrSetStatusController.reconcile(context.TODO(), mwrSetTest) + assert.Nil(t, err) + + // Check for the expected Summary + mwrSetSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + Applied: len(clusters), + Available: len(clusters), + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSetTest.Status.Summary, mwrSetSummary) + + // Check the ManifestworkApplied conditions + appliedCondition := apimeta.FindStatusCondition(mwrSetTest.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied) + assert.NotNil(t, appliedCondition) + assert.Equal(t, appliedCondition.Status, metav1.ConditionTrue) + assert.Equal(t, appliedCondition.Reason, workapiv1alpha1.ReasonAsExpected) + + // Change the mwrSet spec and re-run deploy. + mwTemplate := helpertest.CreateTestManifestWorkSpecWithSecret("v2", "kindtest", "ns-test", "name-test") + mwTemplate.DeepCopyInto(&mwrSetTest.Spec.ManifestWorkTemplate) + + mwrSetTest, _, err = mwrSetStatusController.reconcile(context.TODO(), mwrSetTest) + assert.Nil(t, err) + + // Check for the expected Summary + mwrSetSummary = workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: len(clusters), + Applied: 0, + Available: 0, + Degraded: 0, + Progressing: 0, + } + assert.Equal(t, mwrSetTest.Status.Summary, mwrSetSummary) + + // Check the ManifestworkApplied conditions + appliedCondition = apimeta.FindStatusCondition(mwrSetTest.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied) + assert.NotNil(t, appliedCondition) + assert.Equal(t, appliedCondition.Status, metav1.ConditionFalse) + assert.Equal(t, appliedCondition.Reason, workapiv1alpha1.ReasonNotAsExpected) +} diff --git a/pkg/work/hub/test/helper.go b/pkg/work/hub/test/helper.go index bcdebbb04..c03dcb37c 100644 --- a/pkg/work/hub/test/helper.go +++ b/pkg/work/hub/test/helper.go @@ -6,7 +6,9 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" workapiv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" @@ -14,10 +16,34 @@ import ( "open-cluster-management.io/ocm/pkg/work/spoke/spoketesting" ) -func CreateTestManifestWorkReplicaSet(name string, ns string, placementName string) *workapiv1alpha1.ManifestWorkReplicaSet { +func CreateTestManifestWorkReplicaSet(name string, ns string, placementNames ...string) *workapiv1alpha1.ManifestWorkReplicaSet { + placements := make(map[string]clusterv1alpha1.RolloutStrategy) + allRollOut := clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + } + + for _, plcName := range placementNames { + placements[plcName] = allRollOut + } + + return CreateTestManifestWorkReplicaSetWithRollOutStrategy(name, ns, placements) +} + +func CreateTestManifestWorkReplicaSetWithRollOutStrategy(name string, ns string, + placements map[string]clusterv1alpha1.RolloutStrategy) *workapiv1alpha1.ManifestWorkReplicaSet { obj := spoketesting.NewUnstructured("v1", "kind", "test-ns", "test-name") mw, _ := spoketesting.NewManifestWork(0, obj) - placementRef := workapiv1alpha1.LocalPlacementReference{Name: placementName} + var placementRefs []workapiv1alpha1.LocalPlacementReference + + for placementName, rollOut := range placements { + placementRefs = append(placementRefs, workapiv1alpha1.LocalPlacementReference{ + Name: placementName, + RolloutStrategy: rollOut, + }) + } mwrs := &workapiv1alpha1.ManifestWorkReplicaSet{ ObjectMeta: metav1.ObjectMeta{ @@ -28,13 +54,13 @@ func CreateTestManifestWorkReplicaSet(name string, ns string, placementName stri }, Spec: workapiv1alpha1.ManifestWorkReplicaSetSpec{ ManifestWorkTemplate: mw.Spec, - PlacementRefs: []workapiv1alpha1.LocalPlacementReference{placementRef}, + PlacementRefs: placementRefs, }, } return mwrs } -func CreateTestManifestWorks(name, namespace string, clusters ...string) []runtime.Object { +func CreateTestManifestWorks(name, namespace string, placementName string, clusters ...string) []runtime.Object { obj := spoketesting.NewUnstructured("v1", "kind", "test-ns", "test-name") works := []runtime.Object{} for _, c := range clusters { @@ -43,6 +69,7 @@ func CreateTestManifestWorks(name, namespace string, clusters ...string) []runti mw.Namespace = c mw.Labels = map[string]string{ "work.open-cluster-management.io/manifestworkreplicaset": fmt.Sprintf("%s.%s", namespace, name), + "work.open-cluster-management.io/placementname": placementName, } meta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{ Type: workapiv1.WorkApplied, @@ -57,8 +84,55 @@ func CreateTestManifestWorks(name, namespace string, clusters ...string) []runti return works } +func CreateTestManifestWork(name, namespace string, placementName string, clusterName string) *workapiv1.ManifestWork { + obj := spoketesting.NewUnstructured("v1", "kind", "test-ns", "test-name") + mw, _ := spoketesting.NewManifestWork(0, obj) + mw.Name = name + mw.Namespace = clusterName + mw.Labels = map[string]string{ + "work.open-cluster-management.io/manifestworkreplicaset": fmt.Sprintf("%s.%s", namespace, name), + "work.open-cluster-management.io/placementname": placementName, + } + meta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{ + Type: workapiv1.WorkApplied, + Status: metav1.ConditionTrue, + }) + meta.SetStatusCondition(&mw.Status.Conditions, metav1.Condition{ + Type: workapiv1.WorkAvailable, + Status: metav1.ConditionTrue, + }) + + return mw +} + +func CreateTestManifestWorkSpecWithSecret(mApiVersion string, mKind string, mNS string, mName string) workapiv1.ManifestWorkSpec { + secret := spoketesting.NewUnstructuredSecret(mName, mNS, true, "0b1441ec-717f-4877-a165-27e5b59245f6") + obj := spoketesting.NewUnstructuredWithContent(mApiVersion, mKind, mNS, mName, secret.Object) + mw, _ := spoketesting.NewManifestWork(0, obj) + + return mw.Spec +} + // Return placement with predicate of label cluster name func CreateTestPlacement(name string, ns string, clusters ...string) (*clusterv1beta1.Placement, *clusterv1beta1.PlacementDecision) { + placement, placementDesicions := CreateTestPlacementWithDecisionStrategy(name, ns, len(clusters), clusters...) + + if len(placementDesicions) < 1 { + placementDecision := &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: name + "-decision", + Namespace: ns, + Labels: map[string]string{clusterv1beta1.PlacementLabel: name, clusterv1beta1.DecisionGroupIndexLabel: "0"}, + }, + } + return placement, placementDecision + } + + return placement, placementDesicions[0] +} + +func CreateTestPlacementWithDecisionStrategy(name string, ns string, clsPerDecisionGroup int, + clusters ...string) (*clusterv1beta1.Placement, []*clusterv1beta1.PlacementDecision) { namereq := metav1.LabelSelectorRequirement{} namereq.Key = "name" namereq.Operator = metav1.LabelSelectorOpIn @@ -74,32 +148,83 @@ func CreateTestPlacement(name string, ns string, clusters ...string) (*clusterv1 }, } + groupStrategy := clusterv1beta1.GroupStrategy{ + ClustersPerDecisionGroup: intstr.FromInt(clsPerDecisionGroup), + } + decisionStrategy := clusterv1beta1.DecisionStrategy{ + GroupStrategy: groupStrategy, + } placement := &clusterv1beta1.Placement{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, Name: name, }, Spec: clusterv1beta1.PlacementSpec{ - Predicates: []clusterv1beta1.ClusterPredicate{clusterPredicate}, + Predicates: []clusterv1beta1.ClusterPredicate{clusterPredicate}, + DecisionStrategy: decisionStrategy, }, } + + var decisionGroups []clusterv1beta1.DecisionGroupStatus + var plcDecisions []*clusterv1beta1.PlacementDecision + clusterGroups := getClusterGroups(clusters, clsPerDecisionGroup) + + for i, clsGroup := range clusterGroups { + plcDecisionName := fmt.Sprintf("%s-decision-%d", name, i) + placementDecision := &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: plcDecisionName, + Namespace: ns, + Labels: map[string]string{clusterv1beta1.PlacementLabel: name, + clusterv1beta1.DecisionGroupIndexLabel: fmt.Sprintf("%d", i)}, + }, + } + + decisions := []clusterv1beta1.ClusterDecision{} + for _, cls := range clsGroup { + decisions = append(decisions, clusterv1beta1.ClusterDecision{ + ClusterName: cls, + }) + } + placementDecision.Status.Decisions = decisions + plcDecisions = append(plcDecisions, placementDecision) + + decisionGroupStatus := clusterv1beta1.DecisionGroupStatus{ + DecisionGroupIndex: int32(i), + DecisionGroupName: "", + Decisions: []string{plcDecisionName}, + ClustersCount: int32(len(clsGroup)), + } + + decisionGroups = append(decisionGroups, decisionGroupStatus) + } + placement.Status.NumberOfSelectedClusters = int32(len(clusters)) + placement.Status.DecisionGroups = decisionGroups - placementDecision := &clusterv1beta1.PlacementDecision{ - ObjectMeta: metav1.ObjectMeta{ - Name: name + "-decision", - Namespace: ns, - Labels: map[string]string{clusterv1beta1.PlacementLabel: name, clusterv1beta1.DecisionGroupIndexLabel: "0"}, - }, + return placement, plcDecisions +} + +func getClusterGroups(clusters []string, clsPerDecisionGroup int) [][]string { + var clusterGroups [][]string + + if clsPerDecisionGroup < 1 { + return clusterGroups } - decisions := []clusterv1beta1.ClusterDecision{} - for _, cls := range clusters { - decisions = append(decisions, clusterv1beta1.ClusterDecision{ - ClusterName: cls, - }) + decisionGroupCount := (len(clusters) / clsPerDecisionGroup) + if len(clusters)%clsPerDecisionGroup > 0 { + decisionGroupCount = decisionGroupCount + 1 + } + + for i := 0; i < decisionGroupCount; i++ { + idx := i * clsPerDecisionGroup + idxLast := idx + clsPerDecisionGroup + if idxLast > len(clusters) { + idxLast = len(clusters) + } + clusterGroups = append(clusterGroups, clusters[idx:idxLast]) } - placementDecision.Status.Decisions = decisions - return placement, placementDecision + return clusterGroups } diff --git a/test/e2e/manifestworkreplicaset_test.go b/test/e2e/manifestworkreplicaset_test.go index 3472761da..107280311 100644 --- a/test/e2e/manifestworkreplicaset_test.go +++ b/test/e2e/manifestworkreplicaset_test.go @@ -12,6 +12,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" clusterapiv1 "open-cluster-management.io/api/cluster/v1" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2" workapiv1 "open-cluster-management.io/api/work/v1" @@ -53,7 +54,15 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { util.NewConfigmap(ns1, "cm1", nil, nil), util.NewConfigmap(ns1, "cm2", nil, nil), newNamespace(ns1)) - placementRef := workapiv1alpha1.LocalPlacementReference{Name: "placement-test"} + placementRef := workapiv1alpha1.LocalPlacementReference{ + Name: "placement-test", + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + }, + } manifestWorkReplicaSet := &workapiv1alpha1.ManifestWorkReplicaSet{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "mwrset-", @@ -261,7 +270,15 @@ var _ = ginkgo.Describe("Test ManifestWorkReplicaSet", func() { ginkgo.By("Create manifestWorkReplicaSet") manifest := workapiv1.Manifest{} manifest.Object = util.NewConfigmap("default", "cm", map[string]string{"a": "b"}, nil) - placementRef := workapiv1alpha1.LocalPlacementReference{Name: placementName} + placementRef := workapiv1alpha1.LocalPlacementReference{ + Name: placementName, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + }, + } mwReplicaSet := &workapiv1alpha1.ManifestWorkReplicaSet{ ObjectMeta: metav1.ObjectMeta{ Name: mwReplicaSetName, diff --git a/test/integration/work/manifestworkreplicaset_test.go b/test/integration/work/manifestworkreplicaset_test.go index 647d4f623..914cb094c 100644 --- a/test/integration/work/manifestworkreplicaset_test.go +++ b/test/integration/work/manifestworkreplicaset_test.go @@ -12,6 +12,7 @@ import ( utilrand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" + clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" workapiv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" @@ -55,7 +56,15 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { manifests := []workapiv1.Manifest{ util.ToManifest(util.NewConfigmap("defaut", cm1, map[string]string{"a": "b"}, nil)), } - placementRef := workapiv1alpha1.LocalPlacementReference{Name: placement.Name} + placementRef := workapiv1alpha1.LocalPlacementReference{ + Name: placement.Name, + RolloutStrategy: clusterv1alpha1.RolloutStrategy{ + Type: clusterv1alpha1.All, + All: &clusterv1alpha1.RolloutAll{ + Timeout: clusterv1alpha1.Timeout{Timeout: "None"}, + }, + }, + } manifestWorkReplicaSet := &workapiv1alpha1.ManifestWorkReplicaSet{ ObjectMeta: metav1.ObjectMeta{