From 0d3dff3f25bdd827081b9b7eae86c0505736739c Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Wed, 15 Mar 2023 21:40:47 +0800 Subject: [PATCH] Add e2e/integration test case for manifestworkreplicaset (#191) * Add e2e/integration test case for manifestworkreplicaset Signed-off-by: Jian Qiu * Fix rbac in e2e test Signed-off-by: Jian Qiu --------- Signed-off-by: Jian Qiu --- deploy/hub/clusterrole.yaml | 13 +- deploy/hub/kustomization.yaml | 4 + deploy/hub/placementdecisions.crd.yaml | 60 ++++ deploy/hub/placements.crd.yaml | 303 ++++++++++++++++++ deploy/hub/role.yaml | 17 + deploy/hub/role_binding.yaml | 13 + .../manifestworkreplicaset_controller.go | 23 +- ...manifestworkreplicaset_deploy_reconcile.go | 12 +- ...nifestworkreplicaset_finalize_reconcile.go | 12 +- .../manifestworkreplicaset_index.go | 8 +- .../manifestworkreplicaset_index_test.go | 2 +- ...manifestworkreplicaset_status_reconcile.go | 23 +- .../manifestworkreplicaset_status_test.go | 18 +- test/e2e/manifestworkreplicaset_test.go | 97 +++++- test/e2e/suite_test.go | 5 + .../manifestworkreplicaset_test.go | 184 ++++++++++- test/integration/suite_test.go | 23 +- 17 files changed, 746 insertions(+), 71 deletions(-) create mode 100644 deploy/hub/placementdecisions.crd.yaml create mode 100644 deploy/hub/placements.crd.yaml create mode 100644 deploy/hub/role.yaml create mode 100644 deploy/hub/role_binding.yaml diff --git a/deploy/hub/clusterrole.yaml b/deploy/hub/clusterrole.yaml index 58d0b7fd3..2455eb14f 100644 --- a/deploy/hub/clusterrole.yaml +++ b/deploy/hub/clusterrole.yaml @@ -3,20 +3,23 @@ kind: ClusterRole metadata: name: open-cluster-management:work-hub-controller rules: -# Allow get/list/watch configmaps -- apiGroups: [""] - resources: ["configmaps"] - verbs: ["get", "list", "watch"] +- apiGroups: [ "" ] + resources: [ "configmaps" ] + verbs: [ "get", "list", "watch" ] # Allow create subjectaccessreviews - apiGroups: ["authorization.k8s.io"] resources: ["subjectaccessreviews"] verbs: ["create"] - apiGroups: ["work.open-cluster-management.io"] resources: ["manifestworks"] - verbs: ["get", "list", "watch","create", "update", "delete", "deletecollection", "patch"] + verbs: ["get", "list", "watch","create", "update", "delete", "deletecollection", "patch", "execute-as"] - apiGroups: ["work.open-cluster-management.io"] resources: ["manifestworkreplicasets"] verbs: ["get", "list", "watch", "update"] - apiGroups: ["work.open-cluster-management.io"] resources: ["manifestworkreplicasets/status"] verbs: ["patch", "update"] +- apiGroups: [ "cluster.open-cluster-management.io" ] + resources: [ "placements", "placementdecisions" ] + verbs: [ "get", "list", "watch"] + diff --git a/deploy/hub/kustomization.yaml b/deploy/hub/kustomization.yaml index 77809ef25..1f8c2f1cd 100644 --- a/deploy/hub/kustomization.yaml +++ b/deploy/hub/kustomization.yaml @@ -4,9 +4,13 @@ namespace: open-cluster-management-hub resources: - ./manifestworkreplicasets.crd.yaml - ./manifestworks.crd.yaml +- ./placements.crd.yaml +- ./placementdecisions.crd.yaml - ./component_namespace.yaml - ./clusterrole_binding.yaml - ./clusterrole.yaml +- ./role.yaml +- ./role_binding.yaml - ./manager_deployment.yaml - ./webhook_deployment.yaml - ./service_account.yaml diff --git a/deploy/hub/placementdecisions.crd.yaml b/deploy/hub/placementdecisions.crd.yaml new file mode 100644 index 000000000..2a7353118 --- /dev/null +++ b/deploy/hub/placementdecisions.crd.yaml @@ -0,0 +1,60 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: placementdecisions.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: PlacementDecision + listKind: PlacementDecisionList + plural: placementdecisions + singular: placementdecision + scope: Namespaced + preserveUnknownFields: false + versions: + - name: v1beta1 + schema: + openAPIV3Schema: + description: "PlacementDecision indicates a decision from a placement PlacementDecision should has a label cluster.open-cluster-management.io/placement={placement name} to reference a certain placement. \n If a placement has spec.numberOfClusters specified, the total number of decisions contained in status.decisions of PlacementDecisions should always be NumberOfClusters; otherwise, the total number of decisions should be the number of ManagedClusters which match the placement requirements. \n Some of the decisions might be empty when there are no enough ManagedClusters meet the placement requirements." + type: object + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + status: + description: Status represents the current status of the PlacementDecision + type: object + required: + - decisions + properties: + decisions: + description: Decisions is a slice of decisions according to a placement The number of decisions should not be larger than 100 + type: array + items: + description: ClusterDecision represents a decision from a placement An empty ClusterDecision indicates it is not scheduled yet. + type: object + required: + - clusterName + - reason + properties: + clusterName: + description: ClusterName is the name of the ManagedCluster. If it is not empty, its value should be unique cross all placement decisions for the Placement. + type: string + reason: + description: Reason represents the reason why the ManagedCluster is selected. + type: string + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/hub/placements.crd.yaml b/deploy/hub/placements.crd.yaml new file mode 100644 index 000000000..2342ed799 --- /dev/null +++ b/deploy/hub/placements.crd.yaml @@ -0,0 +1,303 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: placements.cluster.open-cluster-management.io +spec: + group: cluster.open-cluster-management.io + names: + kind: Placement + listKind: PlacementList + plural: placements + singular: placement + scope: Namespaced + preserveUnknownFields: false + versions: + - additionalPrinterColumns: + - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].status + name: Succeeded + type: string + - jsonPath: .status.conditions[?(@.type=="PlacementSatisfied")].reason + name: Reason + type: string + - jsonPath: .status.numberOfSelectedClusters + name: SelectedClusters + type: integer + name: v1beta1 + schema: + openAPIV3Schema: + description: "Placement defines a rule to select a set of ManagedClusters from the ManagedClusterSets bound to the placement namespace. \n Here is how the placement policy combines with other selection methods to determine a matching list of ManagedClusters: 1. Kubernetes clusters are registered with hub as cluster-scoped ManagedClusters; 2. ManagedClusters are organized into cluster-scoped ManagedClusterSets; 3. ManagedClusterSets are bound to workload namespaces; 4. Namespace-scoped Placements specify a slice of ManagedClusterSets which select a working set of potential ManagedClusters; 5. Then Placements subselect from that working set using label/claim selection. \n No ManagedCluster will be selected if no ManagedClusterSet is bound to the placement namespace. User is able to bind a ManagedClusterSet to a namespace by creating a ManagedClusterSetBinding in that namespace if they have a RBAC rule to CREATE on the virtual subresource of `managedclustersets/bind`. \n A slice of PlacementDecisions with label cluster.open-cluster-management.io/placement={placement name} will be created to represent the ManagedClusters selected by this placement. \n If a ManagedCluster is selected and added into the PlacementDecisions, other components may apply workload on it; once it is removed from the PlacementDecisions, the workload applied on this ManagedCluster should be evicted accordingly." + type: object + required: + - spec + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: Spec defines the attributes of Placement. + type: object + properties: + clusterSets: + description: ClusterSets represent the ManagedClusterSets from which the ManagedClusters are selected. If the slice is empty, ManagedClusters will be selected from the ManagedClusterSets bound to the placement namespace, otherwise ManagedClusters will be selected from the intersection of this slice and the ManagedClusterSets bound to the placement namespace. + type: array + items: + type: string + numberOfClusters: + description: NumberOfClusters represents the desired number of ManagedClusters to be selected which meet the placement requirements. 1) If not specified, all ManagedClusters which meet the placement requirements (including ClusterSets, and Predicates) will be selected; 2) Otherwise if the nubmer of ManagedClusters meet the placement requirements is larger than NumberOfClusters, a random subset with desired number of ManagedClusters will be selected; 3) If the nubmer of ManagedClusters meet the placement requirements is equal to NumberOfClusters, all of them will be selected; 4) If the nubmer of ManagedClusters meet the placement requirements is less than NumberOfClusters, all of them will be selected, and the status of condition `PlacementConditionSatisfied` will be set to false; + type: integer + format: int32 + predicates: + description: Predicates represent a slice of predicates to select ManagedClusters. The predicates are ORed. + type: array + items: + description: ClusterPredicate represents a predicate to select ManagedClusters. + type: object + properties: + requiredClusterSelector: + description: RequiredClusterSelector represents a selector of ManagedClusters by label and claim. If specified, 1) Any ManagedCluster, which does not match the selector, should not be selected by this ClusterPredicate; 2) If a selected ManagedCluster (of this ClusterPredicate) ceases to match the selector (e.g. due to an update) of any ClusterPredicate, it will be eventually removed from the placement decisions; 3) If a ManagedCluster (not selected previously) starts to match the selector, it will either be selected or at least has a chance to be selected (when NumberOfClusters is specified); + type: object + properties: + claimSelector: + description: ClaimSelector represents a selector of ManagedClusters by clusterClaims in status + type: object + properties: + matchExpressions: + description: matchExpressions is a list of cluster claim selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + labelSelector: + description: LabelSelector represents a selector of ManagedClusters by label + type: object + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + type: array + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + type: object + required: + - key + - operator + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + type: array + items: + type: string + matchLabels: + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + additionalProperties: + type: string + prioritizerPolicy: + description: PrioritizerPolicy defines the policy of the prioritizers. If this field is unset, then default prioritizer mode and configurations are used. Referring to PrioritizerPolicy to see more description about Mode and Configurations. + type: object + properties: + configurations: + type: array + items: + description: PrioritizerConfig represents the configuration of prioritizer + type: object + required: + - scoreCoordinate + properties: + scoreCoordinate: + description: ScoreCoordinate represents the configuration of the prioritizer and score source. + type: object + required: + - type + properties: + addOn: + description: When type is "AddOn", AddOn defines the resource name and score name. + type: object + required: + - resourceName + - scoreName + properties: + resourceName: + description: ResourceName defines the resource name of the AddOnPlacementScore. The placement prioritizer selects AddOnPlacementScore CR by this name. + type: string + scoreName: + description: ScoreName defines the score name inside AddOnPlacementScore. AddOnPlacementScore contains a list of score name and score value, ScoreName specify the score to be used by the prioritizer. + type: string + builtIn: + description: 'BuiltIn defines the name of a BuiltIn prioritizer. Below are the valid BuiltIn prioritizer names. 1) Balance: balance the decisions among the clusters. 2) Steady: ensure the existing decision is stabilized. 3) ResourceAllocatableCPU & ResourceAllocatableMemory: sort clusters based on the allocatable. 4) Spread: spread the workload evenly to topologies.' + type: string + type: + description: Type defines the type of the prioritizer score. Type is either "BuiltIn", "AddOn" or "", where "" is "BuiltIn" by default. When the type is "BuiltIn", need to specify a BuiltIn prioritizer name in BuiltIn. When the type is "AddOn", need to configure the score source in AddOn. + type: string + default: BuiltIn + enum: + - BuiltIn + - AddOn + weight: + description: Weight defines the weight of the prioritizer score. The value must be ranged in [-10,10]. Each prioritizer will calculate an integer score of a cluster in the range of [-100, 100]. The final score of a cluster will be sum(weight * prioritizer_score). A higher weight indicates that the prioritizer weights more in the cluster selection, while 0 weight indicates that the prioritizer is disabled. A negative weight indicates wants to select the last ones. + type: integer + format: int32 + default: 1 + maximum: 10 + minimum: -10 + mode: + description: Mode is either Exact, Additive, "" where "" is Additive by default. In Additive mode, any prioritizer not explicitly enumerated is enabled in its default Configurations, in which Steady and Balance prioritizers have the weight of 1 while other prioritizers have the weight of 0. Additive doesn't require configuring all prioritizers. The default Configurations may change in the future, and additional prioritization will happen. In Exact mode, any prioritizer not explicitly enumerated is weighted as zero. Exact requires knowing the full set of prioritizers you want, but avoids behavior changes between releases. + type: string + default: Additive + spreadPolicy: + description: SpreadPolicy defines how placement decisions should be distributed among a set of ManagedClusters. + type: object + properties: + spreadConstraints: + description: SpreadConstraints defines how the placement decision should be distributed among a set of ManagedClusters. The importance of the SpreadConstraintsTerms follows the natural order of their index in the slice. The scheduler first consider SpreadConstraintsTerms with smaller index then those with larger index to distribute the placement decision. + type: array + maxItems: 8 + items: + description: SpreadConstraintsTerm defines a terminology to spread placement decisions. + type: object + required: + - topologyKey + - topologyKeyType + properties: + maxSkew: + description: MaxSkew represents the degree to which the workload may be unevenly distributed. Skew is the maximum difference between the number of selected ManagedClusters in a topology and the global minimum. The global minimum is the minimum number of selected ManagedClusters for the topologies within the same TopologyKey. The minimum possible value of MaxSkew is 1, and the default value is 1. + type: integer + format: int32 + default: 1 + minimum: 1 + topologyKey: + description: TopologyKey is either a label key or a cluster claim name of ManagedClusters. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]$ + topologyKeyType: + description: TopologyKeyType indicates the type of TopologyKey. It could be Label or Claim. + type: string + enum: + - Label + - Claim + whenUnsatisfiable: + description: WhenUnsatisfiable represents the action of the scheduler when MaxSkew cannot be satisfied. It could be DoNotSchedule or ScheduleAnyway. The default value is ScheduleAnyway. DoNotSchedule instructs the scheduler not to schedule more ManagedClusters when MaxSkew is not satisfied. ScheduleAnyway instructs the scheduler to keep scheduling even if MaxSkew is not satisfied. + type: string + default: ScheduleAnyway + enum: + - DoNotSchedule + - ScheduleAnyway + tolerations: + description: Tolerations are applied to placements, and allow (but do not require) the managed clusters with certain taints to be selected by placements with matching tolerations. + type: array + items: + description: Toleration represents the toleration object that can be attached to a placement. The placement this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + type: object + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSelect, PreferNoSelect and NoSelectIfNew. + type: string + enum: + - NoSelect + - PreferNoSelect + - NoSelectIfNew + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a placement can tolerate all taints of a particular category. + type: string + default: Equal + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoSelect/PreferNoSelect, otherwise this field is ignored) tolerates the taint. The default value is nil, which indicates it tolerates the taint forever. The start time of counting the TolerationSeconds should be the TimeAdded in Taint, not the cluster scheduled time or TolerationSeconds added time. + type: integer + format: int64 + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + maxLength: 1024 + status: + description: Status represents the current status of the Placement + type: object + properties: + conditions: + description: Conditions contains the different condition status for this Placement. + type: array + items: + description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions. For example, type FooStatus struct{ // Represents the observations of a foo's current state. // Known .status.conditions.type are: \"Available\", \"Progressing\", and \"Degraded\" // +patchMergeKey=type // +patchStrategy=merge // +listType=map // +listMapKey=type Conditions []metav1.Condition `json:\"conditions,omitempty\" patchStrategy:\"merge\" patchMergeKey:\"type\" protobuf:\"bytes,1,rep,name=conditions\"` \n // other fields }" + type: object + required: + - lastTransitionTime + - message + - reason + - status + - type + properties: + lastTransitionTime: + description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + type: string + format: date-time + message: + description: message is a human readable message indicating details about the transition. This may be an empty string. + type: string + maxLength: 32768 + observedGeneration: + description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. + type: integer + format: int64 + minimum: 0 + reason: + description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. + type: string + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + status: + description: status of the condition, one of True, False, Unknown. + type: string + enum: + - "True" + - "False" + - Unknown + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt) + type: string + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + numberOfSelectedClusters: + description: NumberOfSelectedClusters represents the number of selected ManagedClusters + type: integer + format: int32 + served: true + storage: true + subresources: + status: {} +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/hub/role.yaml b/deploy/hub/role.yaml new file mode 100644 index 000000000..b8f4a01bb --- /dev/null +++ b/deploy/hub/role.yaml @@ -0,0 +1,17 @@ +# Mandatory Role permission for work agent +# Work agent can not run without these permissions +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: open-cluster-management:work-hub-controller +rules: +# leader election needs to operate configmaps +- apiGroups: [ "" ] + resources: [ "configmaps" ] + verbs: [ "get", "list", "watch", "create", "delete", "update", "patch" ] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["create", "get", "list", "update", "watch", "patch"] +- apiGroups: ["", "events.k8s.io"] + resources: ["events"] + verbs: ["create", "patch", "update"] diff --git a/deploy/hub/role_binding.yaml b/deploy/hub/role_binding.yaml new file mode 100644 index 000000000..9b4e83149 --- /dev/null +++ b/deploy/hub/role_binding.yaml @@ -0,0 +1,13 @@ +# RoleBinding for work mandatory permissions. +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: open-cluster-management:work-hub-controller +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: open-cluster-management:work-hub-controller +subjects: + - kind: ServiceAccount + name: work-hub-sa + namespace: open-cluster-management-hub diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go index b7b43070c..e512e2a12 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_controller.go @@ -11,7 +11,9 @@ import ( "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -21,9 +23,12 @@ import ( workclientset "open-cluster-management.io/api/client/work/clientset/versioned" workinformerv1 "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" workinformerv1alpha1 "open-cluster-management.io/api/client/work/informers/externalversions/work/v1alpha1" + worklisterv1 "open-cluster-management.io/api/client/work/listers/work/v1" worklisterv1alpha1 "open-cluster-management.io/api/client/work/listers/work/v1alpha1" "open-cluster-management.io/api/utils/work/v1/workapplier" + workapiv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" + "strings" ) const ( @@ -100,11 +105,15 @@ func NewManifestWorkReplicaSetController( }, manifestWorkReplicaSetInformer.Informer()). WithFilteredEventsInformersQueueKeyFunc(func(obj runtime.Object) string { accessor, _ := meta.Accessor(obj) - key, ok := accessor.GetLabels()[ManifestWorkReplicaSetControllerNameLabelKey] + labelValue, ok := accessor.GetLabels()[ManifestWorkReplicaSetControllerNameLabelKey] if !ok { return "" } - return key + keys := strings.Split(labelValue, ".") + if len(keys) != 2 { + return "" + } + return fmt.Sprintf("%s/%s", keys[0], keys[1]) }, func(obj interface{}) bool { accessor, err := meta.Accessor(obj) if err != nil { @@ -193,3 +202,13 @@ func (m *ManifestWorkReplicaSetController) patchPlaceManifestStatus(ctx context. _, err = m.workClient.WorkV1alpha1().ManifestWorkReplicaSets(old.Namespace).Patch(ctx, old.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } + +func listManifestWorksByManifestWorkReplicaSet(mwrs *workapiv1alpha1.ManifestWorkReplicaSet, manifestWorkLister worklisterv1.ManifestWorkLister) ([]*workapiv1.ManifestWork, error) { + req, err := labels.NewRequirement(ManifestWorkReplicaSetControllerNameLabelKey, selection.Equals, []string{manifestWorkReplicaSetKey(mwrs)}) + if err != nil { + return nil, err + } + + selector := labels.NewSelector().Add(*req) + return manifestWorkLister.List(selector) +} diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go index 0a9ea7e6e..3aafe389c 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_deploy_reconcile.go @@ -6,8 +6,6 @@ import ( "k8s.io/apimachinery/pkg/api/errors" apimeta "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" @@ -41,13 +39,7 @@ func (d *deployReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha placements = append(placements, placement) } - req, err := labels.NewRequirement(ManifestWorkReplicaSetControllerNameLabelKey, selection.In, []string{mwrSet.Name}) - if err != nil { - return mwrSet, reconcileContinue, err - } - - selector := labels.NewSelector().Add(*req) - manifestWorks, err := d.manifestWorkLister.List(selector) + manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(mwrSet, d.manifestWorkLister) if err != nil { return mwrSet, reconcileContinue, err } @@ -168,7 +160,7 @@ func CreateManifestWork(mwrSet *workapiv1alpha1.ManifestWorkReplicaSet, clusterN ObjectMeta: metav1.ObjectMeta{ Name: mwrSet.Name, Namespace: clusterNS, - Labels: map[string]string{ManifestWorkReplicaSetControllerNameLabelKey: mwrSet.Name}, + Labels: map[string]string{ManifestWorkReplicaSetControllerNameLabelKey: manifestWorkReplicaSetKey(mwrSet)}, }, Spec: mwrSet.Spec.ManifestWorkTemplate}, nil } diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go index 5ffc3336e..10590fd75 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_finalize_reconcile.go @@ -3,8 +3,6 @@ package manifestworkreplicasetcontroller import ( "context" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" utilerrors "k8s.io/apimachinery/pkg/util/errors" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" worklisterv1 "open-cluster-management.io/api/client/work/listers/work/v1" @@ -32,7 +30,7 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp break } } - // if there is no finalizer, we do not need to reconcile anymore and we do not need to + // if there is no finalizer, we do not need to reconcile anymore. if !found { return mwrSet, reconcileStop, nil } @@ -53,13 +51,7 @@ func (f *finalizeReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alp } func (m *finalizeReconciler) finalizeManifestWorkReplicaSet(ctx context.Context, manifestWorkReplicaSet *workapiv1alpha1.ManifestWorkReplicaSet) error { - req, err := labels.NewRequirement(ManifestWorkReplicaSetControllerNameLabelKey, selection.In, []string{manifestWorkReplicaSet.Name}) - if err != nil { - return err - } - - selector := labels.NewSelector().Add(*req) - manifestWorks, err := m.manifestWorkLister.List(selector) + manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(manifestWorkReplicaSet, m.manifestWorkLister) if err != nil { return err } diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index.go index 5f1af75ec..718420460 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index.go @@ -62,7 +62,7 @@ func (m *ManifestWorkReplicaSetController) placementDecisionQueueKeysFunc(obj ru return keys } -// we will generate manifestwork with a lab +// we will generate manifestwork with a label func (m *ManifestWorkReplicaSetController) manifestWorkQueueKeyFunc(obj runtime.Object) string { accessor, _ := meta.Accessor(obj) key, ok := accessor.GetLabels()[ManifestWorkReplicaSetControllerNameLabelKey] @@ -87,3 +87,9 @@ func indexManifestWorkReplicaSetByPlacement(obj interface{}) ([]string, error) { return keys, nil } + +// manifestWorkReplicaSetKey return the value of the key of manifestworkreplicaset, and comply with +// label value format. +func manifestWorkReplicaSetKey(mwrs *workapiv1alpha1.ManifestWorkReplicaSet) string { + return fmt.Sprintf("%s.%s", mwrs.Namespace, mwrs.Name) +} diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go index e04f43731..5f3aa5740 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_index_test.go @@ -108,7 +108,7 @@ func TestPlaceMWControllerIndex(t *testing.T) { // Check manifestWork Queue Keys key := pmwController.manifestWorkQueueKeyFunc(mw) - if key != mwrSetTest.Name { + if key != mwrSetTest.Namespace+"."+mwrSetTest.Name { t.Fatal("Expected manifestwork key not match", key, " - ", mwrSetTest.Name) } // Check manifestWork Queue Keys not exist diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go index 354124f7f..f7617ae51 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_reconcile.go @@ -3,9 +3,6 @@ package manifestworkreplicasetcontroller import ( "context" apimeta "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" worklisterv1 "open-cluster-management.io/api/client/work/listers/work/v1" workapiv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" @@ -29,13 +26,7 @@ func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha return mwrSet, reconcileContinue, nil } - req, err := labels.NewRequirement(ManifestWorkReplicaSetControllerNameLabelKey, selection.In, []string{mwrSet.Name}) - if err != nil { - return mwrSet, reconcileContinue, err - } - - selector := labels.NewSelector().Add(*req) - manifestWorks, err := d.manifestWorkLister.List(selector) + manifestWorks, err := listManifestWorksByManifestWorkReplicaSet(mwrSet, d.manifestWorkLister) if err != nil { return mwrSet, reconcileContinue, err } @@ -47,23 +38,19 @@ func (d *statusReconciler) reconcile(ctx context.Context, mwrSet *workapiv1alpha } // applied condition - condition := apimeta.FindStatusCondition(mw.Status.Conditions, string(workapiv1.ManifestApplied)) - if condition != nil && condition.Status == metav1.ConditionTrue { + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkApplied) { appliedCount++ } // Progressing condition - condition = apimeta.FindStatusCondition(mw.Status.Conditions, string(workapiv1.ManifestProgressing)) - if condition != nil && condition.Status == metav1.ConditionTrue { + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkProgressing) { processingCount++ } // Available condition - condition = apimeta.FindStatusCondition(mw.Status.Conditions, string(workapiv1.ManifestAvailable)) - if condition != nil && condition.Status == metav1.ConditionTrue { + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkAvailable) { availableCount++ } // Degraded condition - condition = apimeta.FindStatusCondition(mw.Status.Conditions, string(workapiv1.ManifestDegraded)) - if condition != nil && condition.Status == metav1.ConditionTrue { + if apimeta.IsStatusConditionTrue(mw.Status.Conditions, workapiv1.WorkDegraded) { degradCount++ } } diff --git a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go index f969f71fe..ff699c457 100644 --- a/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go +++ b/pkg/hub/controllers/manifestworkreplicasetcontroller/manifestworkreplicaset_status_test.go @@ -27,10 +27,10 @@ func TestStatusReconcileAsExpected(t *testing.T) { for _, cls := range clusters { mw, _ := CreateManifestWork(mwrSetTest, cls) - cond := getCondition(string(workv1.ManifestApplied), "", "", metav1.ConditionTrue) + cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) - cond = getCondition(string(workv1.ManifestAvailable), "", "", metav1.ConditionTrue) + cond = getCondition(workv1.WorkAvailable, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) if err := workInformerFactory.Work().V1().ManifestWorks().Informer().GetStore().Add(mw); err != nil { t.Fatal(err) @@ -92,14 +92,14 @@ func TestStatusReconcileAsProcessing(t *testing.T) { for id, cls := range clusters { mw, _ := CreateManifestWork(mwrSetTest, cls) - cond := getCondition(string(workv1.ManifestApplied), "", "", metav1.ConditionTrue) + cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) if id%2 == 0 { - cond = getCondition(string(workv1.ManifestAvailable), "", "", metav1.ConditionTrue) + cond = getCondition(workv1.WorkAvailable, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) } else { - cond = getCondition(string(workv1.ManifestProgressing), "", "", metav1.ConditionTrue) + cond = getCondition(workv1.WorkProgressing, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) } @@ -164,19 +164,19 @@ func TestStatusReconcileNotAsExpected(t *testing.T) { avaCount, processingCount, degradCount := 0, 0, 0 for id, cls := range clusters { mw, _ := CreateManifestWork(mwrSetTest, cls) - cond := getCondition(string(workv1.ManifestApplied), "", "", metav1.ConditionTrue) + cond := getCondition(workv1.WorkApplied, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) if id%2 == 0 { - cond = getCondition(string(workv1.ManifestAvailable), "", "", metav1.ConditionTrue) + cond = getCondition(workv1.WorkAvailable, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) avaCount++ } else if id%3 == 0 { - cond = getCondition(string(workv1.ManifestDegraded), "", "", metav1.ConditionTrue) + cond = getCondition(workv1.WorkDegraded, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) processingCount++ } else { - cond = getCondition(string(workv1.ManifestProgressing), "", "", metav1.ConditionTrue) + cond = getCondition(workv1.WorkProgressing, "", "", metav1.ConditionTrue) apimeta.SetStatusCondition(&mw.Status.Conditions, cond) degradCount++ } diff --git a/test/e2e/manifestworkreplicaset_test.go b/test/e2e/manifestworkreplicaset_test.go index 054f9fbff..884095f9b 100644 --- a/test/e2e/manifestworkreplicaset_test.go +++ b/test/e2e/manifestworkreplicaset_test.go @@ -2,33 +2,120 @@ package e2e import ( "context" + "fmt" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" ) var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { var err error + var nameSuffix string + + ginkgo.BeforeEach(func() { + nameSuffix = rand.String(5) + }) + ginkgo.Context("Creating a ManifestWorkReplicaSet", func() { ginkgo.It("Should create ManifestWorkReplicaSet successfullt", func() { - work := newManifestWork("", "", []runtime.Object{newConfigmap("default", "cm1", nil, nil)}...) + ginkgo.By("create manifestworkreplicaset") + ns1 := fmt.Sprintf("ns1-%s", nameSuffix) + work := newManifestWork("", "", + newConfigmap(ns1, "cm1", nil, nil), + newConfigmap(ns1, "cm2", nil, nil), + newNamespace(ns1)) placementRef := workapiv1alpha1.LocalPlacementReference{Name: "placement-test"} manifestWorkReplicaSet := &workapiv1alpha1.ManifestWorkReplicaSet{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "mwrset-", - Namespace: "default", + Namespace: metav1.NamespaceDefault, }, Spec: workapiv1alpha1.ManifestWorkReplicaSetSpec{ ManifestWorkTemplate: work.Spec, PlacementRefs: []workapiv1alpha1.LocalPlacementReference{placementRef}, }, } - manifestWorkReplicaSet, err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets("default").Create(context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) + manifestWorkReplicaSet, err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Create(context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + placement := &clusterv1beta1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: placementRef.Name, + Namespace: metav1.NamespaceDefault, + }, + } + + placementDecision := &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: placement.Name, + Namespace: metav1.NamespaceDefault, + Labels: map[string]string{clusterv1beta1.PlacementLabel: placement.Name}, + }, + } + + placement, err = hubClusterClient.ClusterV1beta1().Placements(placement.Namespace).Create(context.TODO(), placement, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + placementDecision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Create(context.TODO(), placementDecision, metav1.CreateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + placementDecision.Status.Decisions = []clusterv1beta1.ClusterDecision{{ClusterName: clusterName}} + placementDecision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), placementDecision, metav1.UpdateOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + ginkgo.By("check if resources are applied for manifests") + gomega.Eventually(func() error { + _, err := spokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm1", metav1.GetOptions{}) + if err != nil { + return err + } + + _, err = spokeKubeClient.CoreV1().ConfigMaps(ns1).Get(context.Background(), "cm2", metav1.GetOptions{}) + if err != nil { + return err + } + + _, err = spokeKubeClient.CoreV1().Namespaces().Get(context.Background(), ns1, metav1.GetOptions{}) + return err + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + ginkgo.By("check if manifestworkreplicaset status") + gomega.Eventually(func() error { + mwrs, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Get(context.TODO(), manifestWorkReplicaSet.Name, metav1.GetOptions{}) + if err != nil { + return err + } + + expectedSummary := workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: 1, + Available: 1, + Applied: 1, + } + + if mwrs.Status.Summary != expectedSummary { + return fmt.Errorf("summary is not correct, expect %v, got %v", expectedSummary, mwrs.Status.Summary) + } + + if !meta.IsStatusConditionTrue(mwrs.Status.Conditions, workapiv1alpha1.ManifestWorkReplicaSetConditionManifestworkApplied) { + return fmt.Errorf("manifestwork replicaset condition is not correct") + } + + return nil + }, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred()) + + // TODO we should also update manifestwork replicaset and test + + err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(metav1.NamespaceDefault).Delete(context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + err = hubClusterClient.ClusterV1beta1().Placements(placement.Namespace).Delete(context.TODO(), placement.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) - err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets("default").Delete(context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) + err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Delete(context.TODO(), placementDecision.Name, metav1.DeleteOptions{}) gomega.Expect(err).NotTo(gomega.HaveOccurred()) }) }) diff --git a/test/e2e/suite_test.go b/test/e2e/suite_test.go index 727f452e6..43b0f44d7 100644 --- a/test/e2e/suite_test.go +++ b/test/e2e/suite_test.go @@ -21,6 +21,7 @@ import ( "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" workclientset "open-cluster-management.io/api/client/work/clientset/versioned" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/log/zap" @@ -57,6 +58,7 @@ var ( spokeDynamicClient dynamic.Interface hubWorkClient workclientset.Interface spokeWorkClient workclientset.Interface + hubClusterClient clusterclientset.Interface agentDeployer workAgentDeployer ) @@ -91,6 +93,9 @@ var _ = ginkgo.BeforeSuite(func() { hubWorkClient, err = workclientset.NewForConfig(hubRestConfig) gomega.Expect(err).ToNot(gomega.HaveOccurred()) + hubClusterClient, err = clusterclientset.NewForConfig(hubRestConfig) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + // pick up managedKubeconfig from argument first, // and then fall back to hubKubeconfig if len(managedKubeconfig) == 0 { diff --git a/test/integration/manifestworkreplicaset_test.go b/test/integration/manifestworkreplicaset_test.go index 40c33b2de..350bc987d 100644 --- a/test/integration/manifestworkreplicaset_test.go +++ b/test/integration/manifestworkreplicaset_test.go @@ -2,11 +2,15 @@ package integration import ( "context" + "fmt" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilrand "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/sets" + clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" workapiv1 "open-cluster-management.io/api/work/v1" workapiv1alpha1 "open-cluster-management.io/api/work/v1alpha1" "open-cluster-management.io/work/test/integration/util" @@ -14,6 +18,9 @@ import ( var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { var namespaceName string + var placement *clusterv1beta1.Placement + var placementDecision *clusterv1beta1.PlacementDecision + var generateTestFixture func(numberOfClusters int) (*workapiv1alpha1.ManifestWorkReplicaSet, sets.Set[string], error) ginkgo.BeforeEach(func() { namespaceName = utilrand.String(5) @@ -21,20 +28,28 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { ns.Name = namespaceName _, err := spokeKubeClient.CoreV1().Namespaces().Create(context.Background(), ns, metav1.CreateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) - ginkgo.AfterEach(func() { - err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), namespaceName, metav1.DeleteOptions{}) - gomega.Expect(err).ToNot(gomega.HaveOccurred()) - }) + placement = &clusterv1beta1.Placement{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement", + Namespace: namespaceName, + }, + } - // A sanity check ensuring crd is created correctly which should be refactored later - ginkgo.Context("Create a manifestWorkReplicaSet", func() { - ginkgo.It("should create successfully", func() { + placementDecision = &clusterv1beta1.PlacementDecision{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-placement-decision", + Namespace: namespaceName, + Labels: map[string]string{clusterv1beta1.PlacementLabel: placement.Name}, + }, + } + + generateTestFixture = func(numberOfClusters int) (*workapiv1alpha1.ManifestWorkReplicaSet, sets.Set[string], error) { + clusterNames := sets.New[string]() manifests := []workapiv1.Manifest{ util.ToManifest(util.NewConfigmap("defaut", "cm1", map[string]string{"a": "b"}, nil)), } - placementRef := workapiv1alpha1.LocalPlacementReference{Name: "placement-test"} + placementRef := workapiv1alpha1.LocalPlacementReference{Name: placement.Name} manifestWorkReplicaSet := &workapiv1alpha1.ManifestWorkReplicaSet{ ObjectMeta: metav1.ObjectMeta{ @@ -52,7 +67,158 @@ var _ = ginkgo.Describe("ManifestWorkReplicaSet", func() { } _, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespaceName).Create(context.TODO(), manifestWorkReplicaSet, metav1.CreateOptions{}) + if err != nil { + return nil, clusterNames, err + } + + _, err = hubClusterClient.ClusterV1beta1().Placements(placement.Namespace).Create(context.TODO(), placement, metav1.CreateOptions{}) + if err != nil { + return nil, clusterNames, err + } + + decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Create(context.TODO(), placementDecision, metav1.CreateOptions{}) + if err != nil { + return nil, clusterNames, err + } + + for i := 0; i < numberOfClusters; i++ { + clusterName := "cluster-" + utilrand.String(5) + ns := &corev1.Namespace{} + ns.Name = clusterName + _, err = spokeKubeClient.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + if err != nil { + return nil, clusterNames, err + } + decision.Status.Decisions = append(decision.Status.Decisions, clusterv1beta1.ClusterDecision{ClusterName: clusterName}) + clusterNames.Insert(clusterName) + } + + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) + return manifestWorkReplicaSet, clusterNames, err + } + }) + + ginkgo.AfterEach(func() { + err := spokeKubeClient.CoreV1().Namespaces().Delete(context.Background(), namespaceName, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + }) + + // A sanity check ensuring crd is created correctly which should be refactored later + ginkgo.Context("Create and update a manifestWorkReplicaSet", func() { + ginkgo.It("should create/update/delete successfully", func() { + manifestWorkReplicaSet, clusterNames, err := generateTestFixture(3) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + gomega.Eventually(assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: 3, + }, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + ginkgo.By("Update decision so manifestworks should be updated") + decision, err := hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).Get(context.TODO(), placementDecision.Name, metav1.GetOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + removedCluster := decision.Status.Decisions[2].ClusterName + decision.Status.Decisions = decision.Status.Decisions[:2] + decision, err = hubClusterClient.ClusterV1beta1().PlacementDecisions(placementDecision.Namespace).UpdateStatus(context.TODO(), decision, metav1.UpdateOptions{}) gomega.Expect(err).ToNot(gomega.HaveOccurred()) + clusterNames.Delete(removedCluster) + gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + gomega.Eventually(assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: 2, + }, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + ginkgo.By("Delete manifestworkreplicaset") + err = hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(namespaceName).Delete(context.TODO(), manifestWorkReplicaSet.Name, metav1.DeleteOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + gomega.Eventually(assertWorksByReplicaSet(sets.New[string](), manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + }) + + ginkgo.It("status should update when manifestwork status change", func() { + manifestWorkReplicaSet, clusterNames, err := generateTestFixture(1) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + gomega.Eventually(assertWorksByReplicaSet(clusterNames, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + gomega.Eventually(assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: 1, + }, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + key := fmt.Sprintf("%s.%s", manifestWorkReplicaSet.Namespace, manifestWorkReplicaSet.Name) + works, err := hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("work.open-cluster-management.io/manifestworkreplicaset=%s", key), + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + for _, work := range works.Items { + workCopy := work.DeepCopy() + meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkApplied, Status: metav1.ConditionTrue, Reason: "ApplyTest"}) + meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionTrue, Reason: "ApplyTest"}) + _, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + gomega.Eventually(assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: 1, + Applied: 1, + Available: 1, + }, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) + + works, err = hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("work.open-cluster-management.io/manifestworkreplicaset=%s", key), + }) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + + for _, work := range works.Items { + workCopy := work.DeepCopy() + meta.SetStatusCondition(&workCopy.Status.Conditions, metav1.Condition{Type: workapiv1.WorkAvailable, Status: metav1.ConditionFalse, Reason: "ApplyTest"}) + _, err := hubWorkClient.WorkV1().ManifestWorks(workCopy.Namespace).UpdateStatus(context.TODO(), workCopy, metav1.UpdateOptions{}) + gomega.Expect(err).ToNot(gomega.HaveOccurred()) + } + + gomega.Eventually(assertSummary(workapiv1alpha1.ManifestWorkReplicaSetSummary{ + Total: 1, + Applied: 1, + Available: 0, + }, manifestWorkReplicaSet), eventuallyTimeout, eventuallyInterval).Should(gomega.Succeed()) }) }) }) + +func assertSummary(summary workapiv1alpha1.ManifestWorkReplicaSetSummary, mwrs *workapiv1alpha1.ManifestWorkReplicaSet) func() error { + return func() error { + rs, err := hubWorkClient.WorkV1alpha1().ManifestWorkReplicaSets(mwrs.Namespace).Get(context.TODO(), mwrs.Name, metav1.GetOptions{}) + + if err != nil { + return err + } + + if rs.Status.Summary != summary { + return fmt.Errorf("unexpected summary expected: %v, got :%v", summary, rs.Status.Summary) + } + + return nil + } +} + +func assertWorksByReplicaSet(clusterNames sets.Set[string], mwrs *workapiv1alpha1.ManifestWorkReplicaSet) func() error { + return func() error { + key := fmt.Sprintf("%s.%s", mwrs.Namespace, mwrs.Name) + works, err := hubWorkClient.WorkV1().ManifestWorks(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("work.open-cluster-management.io/manifestworkreplicaset=%s", key), + }) + if err != nil { + return err + } + + if len(works.Items) != clusterNames.Len() { + return fmt.Errorf("The number of applied works should equal to %d, but got %d", clusterNames.Len(), len(works.Items)) + } + + for _, work := range works.Items { + if !clusterNames.Has(work.Namespace) { + return fmt.Errorf("unexpected work %s/%s", work.Namespace, work.Name) + } + } + + return nil + } +} diff --git a/test/integration/suite_test.go b/test/integration/suite_test.go index 81d04cfb1..8ce4323f0 100644 --- a/test/integration/suite_test.go +++ b/test/integration/suite_test.go @@ -1,6 +1,10 @@ package integration import ( + "context" + "github.com/openshift/library-go/pkg/controller/controllercmd" + clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned" + "open-cluster-management.io/work/pkg/hub" "os" "path" "path/filepath" @@ -34,7 +38,10 @@ var testEnv *envtest.Environment var spokeKubeClient kubernetes.Interface var spokeWorkClient workclientset.Interface var hubWorkClient workclientset.Interface +var hubClusterClient clusterclientset.Interface var hubHash string +var envCtx context.Context +var envCancel context.CancelFunc func TestIntegration(t *testing.T) { gomega.RegisterFailHandler(ginkgo.Fail) @@ -53,7 +60,7 @@ var _ = ginkgo.BeforeSuite(func() { filepath.Join(".", "deploy", "hub"), }, } - + envCtx, envCancel = context.WithCancel(context.TODO()) cfg, err := testEnv.Start() gomega.Expect(err).ToNot(gomega.HaveOccurred()) gomega.Expect(cfg).ToNot(gomega.BeNil()) @@ -78,11 +85,25 @@ var _ = ginkgo.BeforeSuite(func() { gomega.Expect(err).NotTo(gomega.HaveOccurred()) spokeWorkClient, err = workclientset.NewForConfig(cfg) gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + hubClusterClient, err = clusterclientset.NewForConfig(cfg) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + + // start hub controller + go func() { + err := hub.RunWorkHubManager(envCtx, &controllercmd.ControllerContext{ + KubeConfig: cfg, + EventRecorder: util.NewIntegrationTestEventRecorder("hub"), + }) + gomega.Expect(err).NotTo(gomega.HaveOccurred()) + }() }) var _ = ginkgo.AfterSuite(func() { ginkgo.By("tearing down the test environment") + envCancel() + err := testEnv.Stop() gomega.Expect(err).ToNot(gomega.HaveOccurred())