diff --git a/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml b/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml index 9f108009bd8..b91bf4677bb 100644 --- a/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml +++ b/charts/yurt-manager/crds/apps.openyurt.io_nodepools.yaml @@ -287,6 +287,195 @@ spec: type: object type: object served: true + storage: false + subresources: + status: {} + - additionalPrinterColumns: + - description: The type of nodepool + jsonPath: .spec.type + name: Type + type: string + - description: The number of ready nodes in the pool + jsonPath: .status.readyNodeNum + name: ReadyNodes + type: integer + - jsonPath: .status.unreadyNodeNum + name: NotReadyNodes + type: integer + - jsonPath: .metadata.creationTimestamp + name: Age + type: date + name: v1beta2 + schema: + openAPIV3Schema: + description: NodePool is the Schema for the nodepools API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: NodePoolSpec defines the desired state of NodePool + properties: + annotations: + additionalProperties: + type: string + description: |- + If specified, the Annotations will be added to all nodes. + NOTE: existing labels with samy keys on the nodes will be overwritten. + type: object + hostNetwork: + description: |- + HostNetwork is used to specify that cni components(like flannel) + will not be installed on the nodes of this NodePool. + This means all pods on the nodes of this NodePool will use + HostNetwork and share network namespace with host machine. + type: boolean + interConnectivity: + description: |- + InterConnectivity represents all nodes in the NodePool can access with each other + through Layer 2 or Layer 3 network or not. If the field is true, + nodepool-level list/watch requests reuse can be applied for this nodepool. + otherwise, only node-level list/watch requests reuse can be applied for the nodepool. + This field cannot be changed after creation. + type: boolean + labels: + additionalProperties: + type: string + description: |- + If specified, the Labels will be added to all nodes. + NOTE: existing labels with samy keys on the nodes will be overwritten. + type: object + leaderElectionStrategy: + description: |- + LeaderElectionStrategy represents the policy how to elect a leader Yurthub in a nodepool. + random: select one ready node as leader at random. + mark: select one ready node as leader from nodes that are specified by labelselector. + More strategies will be supported according to user's new requirements. + type: string + leaderNodeLabelSelector: + additionalProperties: + type: string + description: |- + LeaderNodeLabelSelector is used only when LeaderElectionStrategy is mark. leader Yurhub will be + elected from nodes that filtered by this label selector. + type: object + poolScopeMetadata: + description: |- + PoolScopeMetadata is used for specifying resources which will be shared in the nodepool. + And it is supported to modify dynamically. and the default value is v1.Service and discovery.Endpointslice. + items: + description: |- + GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion + to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling + properties: + group: + type: string + kind: + type: string + version: + type: string + required: + - group + - kind + - version + type: object + type: array + taints: + description: If specified, the Taints will be added to all nodes. + items: + description: |- + The node this Taint is attached to has the "effect" on + any pod that does not tolerate the Taint. + properties: + effect: + description: |- + Required. The effect of the taint on pods + that do not tolerate the taint. + Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Required. The taint key to be applied to a node. + type: string + timeAdded: + description: |- + TimeAdded represents the time at which the taint was added. + It is only written for NoExecute taints. + format: date-time + type: string + value: + description: The taint value corresponding to the taint key. + type: string + required: + - effect + - key + type: object + type: array + type: + description: The type of the NodePool + type: string + type: object + status: + description: NodePoolStatus defines the observed state of NodePool + properties: + conditions: + description: |- + Conditions represents the latest available observations of a NodePool's + current state that includes LeaderHubElection status. + items: + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status to another. + format: date-time + type: string + message: + description: A human readable message indicating details about the transition. + type: string + reason: + description: The reason for the condition's last transition. + type: string + status: + description: Status of the condition, one of True, False, Unknown. + type: string + type: + description: Type of NodePool condition. + type: string + type: object + type: array + leaderEndpoints: + description: LeaderEndpoints is used for storing the address of Leader Yurthub. + items: + type: string + type: array + nodes: + description: The list of nodes' names in the pool + items: + type: string + type: array + readyNodeNum: + description: Total number of ready nodes in the pool. + format: int32 + type: integer + unreadyNodeNum: + description: Total number of unready nodes in the pool. + format: int32 + type: integer + type: object + type: object + served: true storage: true subresources: status: {} diff --git a/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml b/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml index e0d5685314f..eab1ea4049e 100644 --- a/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml +++ b/charts/yurt-manager/templates/yurt-manager-auto-generated.yaml @@ -1500,19 +1500,19 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 + - v1beta2 clientConfig: service: name: yurt-manager-webhook-service namespace: {{ .Release.Namespace }} - path: /mutate-apps-openyurt-io-v1beta1-nodepool + path: /mutate-apps-openyurt-io-v1beta2-nodepool failurePolicy: Fail - name: m.v1beta1.nodepool.kb.io + name: m.v1beta2.nodepool.kb.io rules: - apiGroups: - apps.openyurt.io apiVersions: - - v1beta1 + - v1beta2 operations: - CREATE resources: @@ -1690,19 +1690,19 @@ webhooks: sideEffects: None - admissionReviewVersions: - v1 - - v1beta1 + - v1beta2 clientConfig: service: name: yurt-manager-webhook-service namespace: {{ .Release.Namespace }} - path: /validate-apps-openyurt-io-v1beta1-nodepool + path: /validate-apps-openyurt-io-v1beta2-nodepool failurePolicy: Fail - name: v.v1beta1.nodepool.kb.io + name: v.v1beta2.nodepool.kb.io rules: - apiGroups: - apps.openyurt.io apiVersions: - - v1beta1 + - v1beta2 operations: - CREATE - UPDATE diff --git a/pkg/apis/addtoscheme_apps_v1beta2.go b/pkg/apis/addtoscheme_apps_v1beta2.go new file mode 100644 index 00000000000..facf9ad1a8a --- /dev/null +++ b/pkg/apis/addtoscheme_apps_v1beta2.go @@ -0,0 +1,26 @@ +/* +Copyright 2023 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package apis + +import ( + version "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" +) + +func init() { + // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back + AddToSchemes = append(AddToSchemes, version.SchemeBuilder.AddToScheme) +} diff --git a/pkg/apis/apps/v1alpha1/nodepool_conversion.go b/pkg/apis/apps/v1alpha1/nodepool_conversion.go index 8ea0016996b..72cc66b3e8d 100644 --- a/pkg/apis/apps/v1alpha1/nodepool_conversion.go +++ b/pkg/apis/apps/v1alpha1/nodepool_conversion.go @@ -23,15 +23,15 @@ import ( "sigs.k8s.io/controller-runtime/pkg/conversion" "github.com/openyurtio/openyurt/pkg/apis/apps" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) func (src *NodePool) ConvertTo(dstRaw conversion.Hub) error { - dst := dstRaw.(*v1beta1.NodePool) + dst := dstRaw.(*v1beta2.NodePool) dst.ObjectMeta = src.ObjectMeta - dst.Spec.Type = v1beta1.NodePoolType(src.Spec.Type) + dst.Spec.Type = v1beta2.NodePoolType(src.Spec.Type) dst.Spec.Labels = src.Spec.Labels dst.Spec.Annotations = src.Spec.Annotations dst.Spec.Taints = src.Spec.Taints @@ -43,13 +43,17 @@ func (src *NodePool) ConvertTo(dstRaw conversion.Hub) error { dst.Status.UnreadyNodeNum = src.Status.UnreadyNodeNum dst.Status.Nodes = src.Status.Nodes + // Set interconnectivity to false which will not use leader election strategy or reuse list/watch events + dst.Spec.InterConnectivity = false + dst.Spec.LeaderElectionStrategy = string(v1beta2.ElectionStrategyRandom) + klog.V(4).Infof("convert from v1alpha1 to v1beta1 for nodepool %s", dst.Name) return nil } func (dst *NodePool) ConvertFrom(srcRaw conversion.Hub) error { - src := srcRaw.(*v1beta1.NodePool) + src := srcRaw.(*v1beta2.NodePool) dst.ObjectMeta = src.ObjectMeta diff --git a/pkg/apis/apps/v1beta1/nodepool_conversion.go b/pkg/apis/apps/v1beta1/nodepool_conversion.go index be5f0b3bf36..97c6d3fb42b 100644 --- a/pkg/apis/apps/v1beta1/nodepool_conversion.go +++ b/pkg/apis/apps/v1beta1/nodepool_conversion.go @@ -16,11 +16,63 @@ limitations under the License. package v1beta1 -/* -Implementing the hub method is pretty easy -- we just have to add an empty -method called `Hub()` to serve as a -[marker](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/conversion?tab=doc#Hub). -*/ +import ( + "strings" + + "k8s.io/klog/v2" + "sigs.k8s.io/controller-runtime/pkg/conversion" + + "github.com/openyurtio/openyurt/pkg/apis/apps" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" +) + +func (src *NodePool) ConvertTo(dstRaw conversion.Hub) error { + dst := dstRaw.(*v1beta2.NodePool) + + dst.ObjectMeta = src.ObjectMeta + + dst.Spec.Type = v1beta2.NodePoolType(src.Spec.Type) + dst.Spec.Labels = src.Spec.Labels + dst.Spec.Annotations = src.Spec.Annotations + dst.Spec.Taints = src.Spec.Taints + if strings.EqualFold(src.Annotations[apps.NodePoolHostNetworkLabel], "true") { + dst.Spec.HostNetwork = true + } + + dst.Status.ReadyNodeNum = src.Status.ReadyNodeNum + dst.Status.UnreadyNodeNum = src.Status.UnreadyNodeNum + dst.Status.Nodes = src.Status.Nodes + + // Set interconnectivity to false which will not use leader election strategy or reuse list/watch events + dst.Spec.InterConnectivity = false + dst.Spec.LeaderElectionStrategy = string(v1beta2.ElectionStrategyRandom) + + klog.V(4).Infof("convert from v1beta to v1beta2 for nodepool %s", dst.Name) + + return nil +} + +func (dst *NodePool) ConvertFrom(srcRaw conversion.Hub) error { + src := srcRaw.(*v1beta2.NodePool) + + dst.ObjectMeta = src.ObjectMeta + + dst.Spec.Type = NodePoolType(src.Spec.Type) + dst.Spec.Labels = src.Spec.Labels + dst.Spec.Annotations = src.Spec.Annotations + dst.Spec.Taints = src.Spec.Taints + + dst.Status.ReadyNodeNum = src.Status.ReadyNodeNum + dst.Status.UnreadyNodeNum = src.Status.UnreadyNodeNum + dst.Status.Nodes = src.Status.Nodes + + if src.Spec.HostNetwork { + if dst.Annotations == nil { + dst.Annotations = make(map[string]string) + } + dst.Annotations[apps.NodePoolHostNetworkLabel] = "true" + } -// Hub marks this type as a conversion hub. -func (*NodePool) Hub() {} + klog.V(4).Infof("convert from v1beta2 to v1beta1 for nodepool %s", dst.Name) + return nil +} diff --git a/pkg/apis/apps/v1beta1/nodepool_types.go b/pkg/apis/apps/v1beta1/nodepool_types.go index 34eaaa51c81..98816b85d17 100644 --- a/pkg/apis/apps/v1beta1/nodepool_types.go +++ b/pkg/apis/apps/v1beta1/nodepool_types.go @@ -79,7 +79,6 @@ type NodePoolStatus struct { // +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" // +kubebuilder:subresource:status // +genclient:nonNamespaced -// +kubebuilder:storageversion // NodePool is the Schema for the nodepools API type NodePool struct { diff --git a/pkg/apis/apps/v1beta2/default.go b/pkg/apis/apps/v1beta2/default.go new file mode 100644 index 00000000000..7d9b75cdc7b --- /dev/null +++ b/pkg/apis/apps/v1beta2/default.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// SetDefaultsNodePool set default values for NodePool. +func SetDefaultsNodePool(obj *NodePool) { + // example for set default value for NodePool + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + +} diff --git a/pkg/apis/apps/v1beta2/doc.go b/pkg/apis/apps/v1beta2/doc.go new file mode 100644 index 00000000000..82bb85a822a --- /dev/null +++ b/pkg/apis/apps/v1beta2/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2024 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 diff --git a/pkg/apis/apps/v1beta2/groupversion_info.go b/pkg/apis/apps/v1beta2/groupversion_info.go new file mode 100644 index 00000000000..5ace81d7609 --- /dev/null +++ b/pkg/apis/apps/v1beta2/groupversion_info.go @@ -0,0 +1,44 @@ +/* +Copyright 2024 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the License); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an AS IS BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// Package v1beta2 contains API Schema definitions for the apps v1beta2API group +// +kubebuilder:object:generate=true +// +groupName=apps.openyurt.io + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "apps.openyurt.io", Version: "v1beta2"} + + SchemeGroupVersion = GroupVersion + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource is required by pkg/client/listers/... +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} diff --git a/pkg/apis/apps/v1beta2/nodepool_conversion.go b/pkg/apis/apps/v1beta2/nodepool_conversion.go new file mode 100644 index 00000000000..34a152ebfc9 --- /dev/null +++ b/pkg/apis/apps/v1beta2/nodepool_conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2024 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +/* +Implementing the hub method is pretty easy -- we just have to add an empty +method called `Hub()` to serve as a +[marker](https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/conversion?tab=doc#Hub). +*/ + +// Hub marks this type as a conversion hub. +func (*NodePool) Hub() {} diff --git a/pkg/apis/apps/v1beta2/nodepool_types.go b/pkg/apis/apps/v1beta2/nodepool_types.go new file mode 100644 index 00000000000..b722104d2fe --- /dev/null +++ b/pkg/apis/apps/v1beta2/nodepool_types.go @@ -0,0 +1,164 @@ +/* +Copyright 2024 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type NodePoolType string + +// LeaderElectionStrategy represents the policy how to elect a leader Yurthub in a nodepool. +type LeaderElectionStrategy string + +const ( + Edge NodePoolType = "Edge" + Cloud NodePoolType = "Cloud" + + ElectionStrategyMark LeaderElectionStrategy = "mark" + ElectionStrategyRandom LeaderElectionStrategy = "random" + + // LeaderStatus means the status of leader yurthub election. + // If it's ready the leader elected, otherwise no leader is elected. + LeaderStatus NodePoolConditionType = "LeaderReady" +) + +// NodePoolSpec defines the desired state of NodePool +type NodePoolSpec struct { + // The type of the NodePool + // +optional + Type NodePoolType `json:"type,omitempty"` + + // HostNetwork is used to specify that cni components(like flannel) + // will not be installed on the nodes of this NodePool. + // This means all pods on the nodes of this NodePool will use + // HostNetwork and share network namespace with host machine. + HostNetwork bool `json:"hostNetwork,omitempty"` + + // If specified, the Labels will be added to all nodes. + // NOTE: existing labels with samy keys on the nodes will be overwritten. + // +optional + Labels map[string]string `json:"labels,omitempty"` + + // If specified, the Annotations will be added to all nodes. + // NOTE: existing labels with samy keys on the nodes will be overwritten. + // +optional + Annotations map[string]string `json:"annotations,omitempty"` + + // If specified, the Taints will be added to all nodes. + // +optional + Taints []v1.Taint `json:"taints,omitempty"` + + // InterConnectivity represents all nodes in the NodePool can access with each other + // through Layer 2 or Layer 3 network or not. If the field is true, + // nodepool-level list/watch requests reuse can be applied for this nodepool. + // otherwise, only node-level list/watch requests reuse can be applied for the nodepool. + // This field cannot be changed after creation. + InterConnectivity bool `json:"interConnectivity,omitempty"` + + // LeaderElectionStrategy represents the policy how to elect a leader Yurthub in a nodepool. + // random: select one ready node as leader at random. + // mark: select one ready node as leader from nodes that are specified by labelselector. + // More strategies will be supported according to user's new requirements. + LeaderElectionStrategy string `json:"leaderElectionStrategy,omitempty"` + + // LeaderNodeLabelSelector is used only when LeaderElectionStrategy is mark. leader Yurhub will be + // elected from nodes that filtered by this label selector. + LeaderNodeLabelSelector map[string]string `json:"leaderNodeLabelSelector,omitempty"` + + // PoolScopeMetadata is used for specifying resources which will be shared in the nodepool. + // And it is supported to modify dynamically. and the default value is v1.Service and discovery.Endpointslice. + PoolScopeMetadata []metav1.GroupVersionKind `json:"poolScopeMetadata,omitempty"` +} + +// NodePoolStatus defines the observed state of NodePool +type NodePoolStatus struct { + // Total number of ready nodes in the pool. + // +optional + ReadyNodeNum int32 `json:"readyNodeNum"` + + // Total number of unready nodes in the pool. + // +optional + UnreadyNodeNum int32 `json:"unreadyNodeNum"` + + // The list of nodes' names in the pool + // +optional + Nodes []string `json:"nodes,omitempty"` + + // LeaderEndpoints is used for storing the address of Leader Yurthub. + // +optional + LeaderEndpoints []string `json:"leaderEndpoints,omitempty"` + + // Conditions represents the latest available observations of a NodePool's + // current state that includes LeaderHubElection status. + // +optional + Conditions []NodePoolCondition `json:"conditions,omitempty"` +} + +// NodePoolConditionType represents a NodePool condition value. +type NodePoolConditionType string + +type NodePoolCondition struct { + // Type of NodePool condition. + Type NodePoolConditionType `json:"type,omitempty"` + + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status,omitempty"` + + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"` + + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty"` + + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty"` +} + +// +genclient +// +kubebuilder:object:root=true +// +kubebuilder:resource:scope=Cluster,path=nodepools,shortName=np,categories=all +// +kubebuilder:printcolumn:name="Type",type="string",JSONPath=".spec.type",description="The type of nodepool" +// +kubebuilder:printcolumn:name="ReadyNodes",type="integer",JSONPath=".status.readyNodeNum",description="The number of ready nodes in the pool" +// +kubebuilder:printcolumn:name="NotReadyNodes",type="integer",JSONPath=".status.unreadyNodeNum" +// +kubebuilder:printcolumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp" +// +kubebuilder:subresource:status +// +genclient:nonNamespaced +// +kubebuilder:storageversion + +// NodePool is the Schema for the nodepools API +type NodePool struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec NodePoolSpec `json:"spec,omitempty"` + Status NodePoolStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// NodePoolList contains a list of NodePool +type NodePoolList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []NodePool `json:"items"` +} + +func init() { + SchemeBuilder.Register(&NodePool{}, &NodePoolList{}) +} diff --git a/pkg/apis/apps/v1beta2/zz_generated.deepcopy.go b/pkg/apis/apps/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 00000000000..b4b76e08dc8 --- /dev/null +++ b/pkg/apis/apps/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,182 @@ +//go:build !ignore_autogenerated + +/* +Copyright 2023 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePool) DeepCopyInto(out *NodePool) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePool. +func (in *NodePool) DeepCopy() *NodePool { + if in == nil { + return nil + } + out := new(NodePool) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodePool) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolCondition) DeepCopyInto(out *NodePoolCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolCondition. +func (in *NodePoolCondition) DeepCopy() *NodePoolCondition { + if in == nil { + return nil + } + out := new(NodePoolCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolList) DeepCopyInto(out *NodePoolList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodePool, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolList. +func (in *NodePoolList) DeepCopy() *NodePoolList { + if in == nil { + return nil + } + out := new(NodePoolList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodePoolList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolSpec) DeepCopyInto(out *NodePoolSpec) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]v1.Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.LeaderNodeLabelSelector != nil { + in, out := &in.LeaderNodeLabelSelector, &out.LeaderNodeLabelSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.PoolScopeMetadata != nil { + in, out := &in.PoolScopeMetadata, &out.PoolScopeMetadata + *out = make([]metav1.GroupVersionKind, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolSpec. +func (in *NodePoolSpec) DeepCopy() *NodePoolSpec { + if in == nil { + return nil + } + out := new(NodePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolStatus) DeepCopyInto(out *NodePoolStatus) { + *out = *in + if in.Nodes != nil { + in, out := &in.Nodes, &out.Nodes + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LeaderEndpoints != nil { + in, out := &in.LeaderEndpoints, &out.LeaderEndpoints + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodePoolCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolStatus. +func (in *NodePoolStatus) DeepCopy() *NodePoolStatus { + if in == nil { + return nil + } + out := new(NodePoolStatus) + in.DeepCopyInto(out) + return out +} diff --git a/pkg/util/kubernetes/kubeadm/app/util/apiclient/idempotency.go b/pkg/util/kubernetes/kubeadm/app/util/apiclient/idempotency.go index 3fd7b65403d..c6152fc13a9 100644 --- a/pkg/util/kubernetes/kubeadm/app/util/apiclient/idempotency.go +++ b/pkg/util/kubernetes/kubeadm/app/util/apiclient/idempotency.go @@ -33,7 +33,7 @@ import ( clientcmdapi "k8s.io/client-go/tools/clientcmd/api" clientsetretry "k8s.io/client-go/util/retry" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/util/kubernetes/kubeadm/app/constants" ) @@ -73,20 +73,26 @@ func CreateOrUpdateSecret(client clientset.Interface, secret *v1.Secret) error { // CreateOrUpdateRole creates a Role if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { var lastError error - err := wait.PollUntilContextTimeout(context.Background(), constants.APICallRetryInterval, constants.APICallWithWriteTimeout, true, func(ctx context.Context) (bool, error) { - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { - lastError = errors.Wrap(err, "unable to create RBAC role") - return false, nil + err := wait.PollUntilContextTimeout( + context.Background(), + constants.APICallRetryInterval, + constants.APICallWithWriteTimeout, + true, + func(ctx context.Context) (bool, error) { + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Create(context.TODO(), role, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + lastError = errors.Wrap(err, "unable to create RBAC role") + return false, nil + } + + if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{}); err != nil { + lastError = errors.Wrap(err, "unable to update RBAC role") + return false, nil + } } - - if _, err := client.RbacV1().Roles(role.ObjectMeta.Namespace).Update(context.TODO(), role, metav1.UpdateOptions{}); err != nil { - lastError = errors.Wrap(err, "unable to update RBAC role") - return false, nil - } - } - return true, nil - }) + return true, nil + }, + ) if err == nil { return nil } @@ -96,20 +102,26 @@ func CreateOrUpdateRole(client clientset.Interface, role *rbac.Role) error { // CreateOrUpdateRoleBinding creates a RoleBinding if the target resource doesn't exist. If the resource exists already, this function will update the resource instead. func CreateOrUpdateRoleBinding(client clientset.Interface, roleBinding *rbac.RoleBinding) error { var lastError error - err := wait.PollUntilContextTimeout(context.Background(), constants.APICallRetryInterval, constants.APICallWithWriteTimeout, true, func(ctx context.Context) (bool, error) { - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}); err != nil { - if !apierrors.IsAlreadyExists(err) { - lastError = errors.Wrap(err, "unable to create RBAC rolebinding") - return false, nil + err := wait.PollUntilContextTimeout( + context.Background(), + constants.APICallRetryInterval, + constants.APICallWithWriteTimeout, + true, + func(ctx context.Context) (bool, error) { + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Create(context.TODO(), roleBinding, metav1.CreateOptions{}); err != nil { + if !apierrors.IsAlreadyExists(err) { + lastError = errors.Wrap(err, "unable to create RBAC rolebinding") + return false, nil + } + + if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}); err != nil { + lastError = errors.Wrap(err, "unable to update RBAC rolebinding") + return false, nil + } } - - if _, err := client.RbacV1().RoleBindings(roleBinding.ObjectMeta.Namespace).Update(context.TODO(), roleBinding, metav1.UpdateOptions{}); err != nil { - lastError = errors.Wrap(err, "unable to update RBAC rolebinding") - return false, nil - } - } - return true, nil - }) + return true, nil + }, + ) if err == nil { return nil } @@ -138,8 +150,8 @@ func GetConfigMapWithRetry(client clientset.Interface, namespace, name string) ( return nil, lastError } -func GetNodePoolInfoWithRetry(cfg *clientcmdapi.Config, name string) (*v1beta1.NodePool, error) { - gvr := v1beta1.GroupVersion.WithResource("nodepools") +func GetNodePoolInfoWithRetry(cfg *clientcmdapi.Config, name string) (*v1beta2.NodePool, error) { + gvr := v1beta2.GroupVersion.WithResource("nodepools") clientConfig := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{}) restConfig, err := clientConfig.ClientConfig() @@ -166,7 +178,7 @@ func GetNodePoolInfoWithRetry(cfg *clientcmdapi.Config, name string) (*v1beta1.N return false, nil }) if err == nil { - np := new(v1beta1.NodePool) + np := new(v1beta2.NodePool) if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), np); err != nil { return nil, err } diff --git a/pkg/yurthub/filter/initializer/node_initializer_test.go b/pkg/yurthub/filter/initializer/node_initializer_test.go index 284b3ee5a7d..93ed041229d 100644 --- a/pkg/yurthub/filter/initializer/node_initializer_test.go +++ b/pkg/yurthub/filter/initializer/node_initializer_test.go @@ -30,7 +30,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis" "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/yurthub/filter" ) @@ -53,7 +53,11 @@ func (nop *nopNodeHandler) Filter(obj runtime.Object, stopCh <-chan struct{}) ru return obj } -func (nop *nopNodeHandler) SetNodesGetterAndSynced(nodesGetter filter.NodesInPoolGetter, nodesSynced cache.InformerSynced, enablePoolTopology bool) error { +func (nop *nopNodeHandler) SetNodesGetterAndSynced( + nodesGetter filter.NodesInPoolGetter, + nodesSynced cache.InformerSynced, + enablePoolTopology bool, +) error { nop.nodesGetter = nodesGetter nop.nodesSynced = nodesSynced nop.enablePoolTopology = enablePoolTopology @@ -83,14 +87,14 @@ func TestNodesInitializer(t *testing.T) { enablePoolServiceTopology: false, poolName: "hangzhou", yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node1", "node2", diff --git a/pkg/yurthub/filter/initializer/nodes_initializer.go b/pkg/yurthub/filter/initializer/nodes_initializer.go index fcb9435ccc3..06a9c868537 100644 --- a/pkg/yurthub/filter/initializer/nodes_initializer.go +++ b/pkg/yurthub/filter/initializer/nodes_initializer.go @@ -28,7 +28,7 @@ import ( "k8s.io/klog/v2" "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/yurthub/filter" ) @@ -49,7 +49,10 @@ type nodesInitializer struct { } // NewNodesInitializer creates an filterInitializer object -func NewNodesInitializer(enableNodePool, enablePoolServiceTopology bool, dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory) filter.Initializer { +func NewNodesInitializer( + enableNodePool, enablePoolServiceTopology bool, + dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, +) filter.Initializer { var nodesGetter filter.NodesInPoolGetter var nodesSynced cache.InformerSynced var enablePoolTopology bool @@ -76,7 +79,9 @@ func NewNodesInitializer(enableNodePool, enablePoolServiceTopology bool, dynamic } } -func createNodeGetterAndSyncedByNodeBucket(dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory) (filter.NodesInPoolGetter, cache.InformerSynced) { +func createNodeGetterAndSyncedByNodeBucket( + dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, +) (filter.NodesInPoolGetter, cache.InformerSynced) { gvr := v1alpha1.GroupVersion.WithResource("nodebuckets") nodesSynced := dynamicInformerFactory.ForResource(gvr).Informer().HasSynced lister := dynamicInformerFactory.ForResource(gvr).Lister() @@ -114,8 +119,10 @@ func createNodeGetterAndSyncedByNodeBucket(dynamicInformerFactory dynamicinforme return nodesGetter, nodesSynced } -func createNodeGetterAndSyncedByNodePool(dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory) (filter.NodesInPoolGetter, cache.InformerSynced) { - gvr := v1beta1.GroupVersion.WithResource("nodepools") +func createNodeGetterAndSyncedByNodePool( + dynamicInformerFactory dynamicinformer.DynamicSharedInformerFactory, +) (filter.NodesInPoolGetter, cache.InformerSynced) { + gvr := v1beta2.GroupVersion.WithResource("nodepools") nodesSynced := dynamicInformerFactory.ForResource(gvr).Informer().HasSynced lister := dynamicInformerFactory.ForResource(gvr).Lister() nodesGetter := func(poolName string) ([]string, error) { @@ -125,14 +132,14 @@ func createNodeGetterAndSyncedByNodePool(dynamicInformerFactory dynamicinformer. klog.Warningf("could not get nodepool %s, err: %v", poolName, err) return nodes, err } - var nodePool *v1beta1.NodePool + var nodePool *v1beta2.NodePool switch poolObj := runtimeObj.(type) { - case *v1beta1.NodePool: + case *v1beta2.NodePool: nodePool = poolObj case *unstructured.Unstructured: - nodePool = new(v1beta1.NodePool) + nodePool = new(v1beta2.NodePool) if err := runtime.DefaultUnstructuredConverter.FromUnstructured(poolObj.UnstructuredContent(), nodePool); err != nil { - klog.Warningf("object(%s) is not a v1beta1.NodePool, %v", poolObj.GetName(), err) + klog.Warningf("object(%s) is not a v1beta2.NodePool, %v", poolObj.GetName(), err) return nodes, err } default: diff --git a/pkg/yurthub/filter/responsefilter/filter_test.go b/pkg/yurthub/filter/responsefilter/filter_test.go index bcfa231d29c..be3e0f8c24a 100644 --- a/pkg/yurthub/filter/responsefilter/filter_test.go +++ b/pkg/yurthub/filter/responsefilter/filter_test.go @@ -45,7 +45,7 @@ import ( "github.com/openyurtio/openyurt/cmd/yurthub/app/options" "github.com/openyurtio/openyurt/pkg/apis" "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" "github.com/openyurtio/openyurt/pkg/yurthub/filter" "github.com/openyurtio/openyurt/pkg/yurthub/filter/base" @@ -1449,28 +1449,28 @@ func TestResponseFilterForListRequest(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node1", "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -2603,8 +2603,19 @@ func TestResponseFilterForListRequest(t *testing.T) { factory = informers.NewSharedInformerFactory(tc.kubeClient, 24*time.Hour) yurtFactory = dynamicinformer.NewDynamicSharedInformerFactory(tc.yurtClient, 24*time.Hour) - nodesInitializer = initializer.NewNodesInitializer(tc.enableNodePool, tc.enablePoolServiceTopology, yurtFactory) - genericInitializer = initializer.New(factory, tc.kubeClient, tc.nodeName, tc.poolName, tc.masterHost, tc.masterPort) + nodesInitializer = initializer.NewNodesInitializer( + tc.enableNodePool, + tc.enablePoolServiceTopology, + yurtFactory, + ) + genericInitializer = initializer.New( + factory, + tc.kubeClient, + tc.nodeName, + tc.poolName, + tc.masterHost, + tc.masterPort, + ) initializerChain := base.Initializers{} initializerChain = append(initializerChain, genericInitializer, nodesInitializer) diff --git a/pkg/yurthub/filter/servicetopology/filter_test.go b/pkg/yurthub/filter/servicetopology/filter_test.go index 956512b7f3d..4461113d1d5 100644 --- a/pkg/yurthub/filter/servicetopology/filter_test.go +++ b/pkg/yurthub/filter/servicetopology/filter_test.go @@ -35,7 +35,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis" "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" "github.com/openyurtio/openyurt/pkg/util" "github.com/openyurtio/openyurt/pkg/yurthub/filter/base" @@ -183,28 +183,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -321,28 +321,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -468,28 +468,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -612,28 +612,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -765,27 +765,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -902,28 +902,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -1022,28 +1022,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, @@ -1139,28 +1139,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, @@ -1281,28 +1281,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -1408,28 +1408,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -1541,28 +1541,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -1671,28 +1671,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -1808,27 +1808,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -1921,27 +1921,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", @@ -2021,27 +2021,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", @@ -2124,28 +2124,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -2241,28 +2241,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -2362,28 +2362,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -2479,28 +2479,28 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -2600,27 +2600,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node3", }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", }, @@ -2705,27 +2705,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", @@ -2798,27 +2798,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", @@ -2890,27 +2890,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", @@ -2982,27 +2982,27 @@ func TestFilter(t *testing.T) { }, ), yurtClient: fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ currentNodeName, }, }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }, - Status: v1beta1.NodePoolStatus{ + Status: v1beta2.NodePoolStatus{ Nodes: []string{ "node2", "node3", @@ -3346,7 +3346,11 @@ func TestFilter(t *testing.T) { factory.WaitForCacheSync(stopper) yurtFactory := dynamicinformer.NewDynamicSharedInformerFactory(tt.yurtClient, 24*time.Hour) - nodesInitializer := initializer.NewNodesInitializer(tt.enableNodePool, tt.enablePoolServiceTopology, yurtFactory) + nodesInitializer := initializer.NewNodesInitializer( + tt.enableNodePool, + tt.enablePoolServiceTopology, + yurtFactory, + ) nodesInitializer.Initialize(stf) stopper2 := make(chan struct{}) diff --git a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/event_handler.go b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/event_handler.go index 7d9d2dadb99..53569568684 100644 --- a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/event_handler.go +++ b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/event_handler.go @@ -29,7 +29,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/network" "github.com/openyurtio/openyurt/pkg/apis/network/v1alpha1" ) @@ -83,7 +83,13 @@ func handlePoolServiceUpdate(oldObject, newObject client.Object, q workqueue.Rat newServiceName := getServiceNameFromPoolService(newPs) if oldServiceName != newServiceName { - klog.Warningf("service name of %s/%s is changed from %s to %s", oldPs.Namespace, oldPs.Name, oldServiceName, newServiceName) + klog.Warningf( + "service name of %s/%s is changed from %s to %s", + oldPs.Namespace, + oldPs.Name, + oldServiceName, + newServiceName, + ) enqueueService(oldPs.Namespace, oldServiceName, q) enqueueService(newPs.Namespace, newServiceName, q) return @@ -126,7 +132,7 @@ func allLoadBalancerSetServicesEnqueue(c client.Client, q workqueue.RateLimiting } func nodePoolRelatedServiceEnqueue(c client.Client, object client.Object, q workqueue.RateLimitingInterface) { - np := object.(*v1beta1.NodePool) + np := object.(*v1beta2.NodePool) poolServiceList := &v1alpha1.PoolServiceList{} listSelector := client.MatchingLabels{ diff --git a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller.go b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller.go index c5707ce76e6..e5024564046 100644 --- a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller.go +++ b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller.go @@ -42,6 +42,7 @@ import ( appconfig "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/network" netv1alpha1 "github.com/openyurtio/openyurt/pkg/apis/network/v1alpha1" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/config" @@ -114,17 +115,38 @@ func add(mgr manager.Manager, cfg *appconfig.CompletedConfig, r reconcile.Reconc } // Watch for changes to PoolService - err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &corev1.Service{}, &handler.EnqueueRequestForObject{}, NewServicePredicated())) + err = c.Watch( + source.Kind[client.Object]( + mgr.GetCache(), + &corev1.Service{}, + &handler.EnqueueRequestForObject{}, + NewServicePredicated(), + ), + ) if err != nil { return err } - err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &netv1alpha1.PoolService{}, NewPoolServiceEventHandler(), NewPoolServicePredicated())) + err = c.Watch( + source.Kind[client.Object]( + mgr.GetCache(), + &netv1alpha1.PoolService{}, + NewPoolServiceEventHandler(), + NewPoolServicePredicated(), + ), + ) if err != nil { return err } - err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &v1beta1.NodePool{}, NewNodePoolEventHandler(yurtClient.GetClientByControllerNameOrDie(mgr, names.LoadBalancerSetController)), NewNodePoolPredicated())) + err = c.Watch( + source.Kind[client.Object]( + mgr.GetCache(), + &v1beta2.NodePool{}, + NewNodePoolEventHandler(yurtClient.GetClientByControllerNameOrDie(mgr, names.LoadBalancerSetController)), + NewNodePoolPredicated(), + ), + ) if err != nil { return err } @@ -299,7 +321,11 @@ func (r *ReconcileLoadBalancerSet) syncPoolServices(svc *corev1.Service) error { func (r *ReconcileLoadBalancerSet) desiredPoolServices(svc *corev1.Service) ([]netv1alpha1.PoolService, error) { if !isLoadBalancerSetService(svc) { - klog.Warningf("service %s/%s is not multi regional service, set desire pool services is nil", svc.Namespace, svc.Name) + klog.Warningf( + "service %s/%s is not multi regional service, set desire pool services is nil", + svc.Namespace, + svc.Name, + ) return nil, nil } @@ -309,7 +335,12 @@ func (r *ReconcileLoadBalancerSet) desiredPoolServices(svc *corev1.Service) ([]n } if len(nps) == 0 { - r.recorder.Eventf(svc, corev1.EventTypeWarning, "NoMatchNodePool", "No node pool matches the nodepool label selector on the service") + r.recorder.Eventf( + svc, + corev1.EventTypeWarning, + "NoMatchNodePool", + "No node pool matches the nodepool label selector on the service", + ) } var pss []netv1alpha1.PoolService @@ -319,14 +350,14 @@ func (r *ReconcileLoadBalancerSet) desiredPoolServices(svc *corev1.Service) ([]n return pss, nil } -func (r *ReconcileLoadBalancerSet) listNodePoolsByLabelSelector(svc *corev1.Service) ([]v1beta1.NodePool, error) { +func (r *ReconcileLoadBalancerSet) listNodePoolsByLabelSelector(svc *corev1.Service) ([]v1beta2.NodePool, error) { labelStr := svc.Annotations[network.AnnotationNodePoolSelector] labelSelector, err := labels.Parse(labelStr) if err != nil { return nil, err } - npList := &v1beta1.NodePoolList{} + npList := &v1beta2.NodePoolList{} if err := r.List(context.Background(), npList, &client.ListOptions{LabelSelector: labelSelector}); err != nil { return nil, err } @@ -334,8 +365,8 @@ func (r *ReconcileLoadBalancerSet) listNodePoolsByLabelSelector(svc *corev1.Serv return filterDeletionNodePools(npList.Items), nil } -func filterDeletionNodePools(allItems []v1beta1.NodePool) []v1beta1.NodePool { - var filterItems []v1beta1.NodePool +func filterDeletionNodePools(allItems []v1beta2.NodePool) []v1beta2.NodePool { + var filterItems []v1beta2.NodePool for _, item := range allItems { if !item.DeletionTimestamp.IsZero() { @@ -347,7 +378,7 @@ func filterDeletionNodePools(allItems []v1beta1.NodePool) []v1beta1.NodePool { return filterItems } -func buildPoolService(svc *corev1.Service, np *v1beta1.NodePool) netv1alpha1.PoolService { +func buildPoolService(svc *corev1.Service, np *v1beta2.NodePool) netv1alpha1.PoolService { isController, isBlockOwnerDeletion := true, true return netv1alpha1.PoolService{ TypeMeta: v1.TypeMeta{ @@ -357,7 +388,11 @@ func buildPoolService(svc *corev1.Service, np *v1beta1.NodePool) netv1alpha1.Poo ObjectMeta: v1.ObjectMeta{ Namespace: svc.Namespace, Name: svc.Name + "-" + np.Name, - Labels: map[string]string{network.LabelServiceName: svc.Name, network.LabelNodePoolName: np.Name, labelManageBy: names.LoadBalancerSetController}, + Labels: map[string]string{ + network.LabelServiceName: svc.Name, + network.LabelNodePoolName: np.Name, + labelManageBy: names.LoadBalancerSetController, + }, OwnerReferences: []v1.OwnerReference{ { APIVersion: svc.APIVersion, @@ -382,7 +417,9 @@ func buildPoolService(svc *corev1.Service, np *v1beta1.NodePool) netv1alpha1.Poo } } -func (r *ReconcileLoadBalancerSet) diffPoolServices(desirePoolServices, currentPoolServices []netv1alpha1.PoolService) (applications []netv1alpha1.PoolService, deletions []netv1alpha1.PoolService) { +func (r *ReconcileLoadBalancerSet) diffPoolServices( + desirePoolServices, currentPoolServices []netv1alpha1.PoolService, +) (applications []netv1alpha1.PoolService, deletions []netv1alpha1.PoolService) { for _, dps := range desirePoolServices { if exist := r.isPoolServicePresent(currentPoolServices, dps); !exist { applications = append(applications, dps) @@ -398,7 +435,10 @@ func (r *ReconcileLoadBalancerSet) diffPoolServices(desirePoolServices, currentP return } -func (r *ReconcileLoadBalancerSet) isPoolServicePresent(poolServices []netv1alpha1.PoolService, ps netv1alpha1.PoolService) bool { +func (r *ReconcileLoadBalancerSet) isPoolServicePresent( + poolServices []netv1alpha1.PoolService, + ps netv1alpha1.PoolService, +) bool { for _, dps := range poolServices { if dps.Name == ps.Name { return true @@ -424,7 +464,12 @@ func (r *ReconcileLoadBalancerSet) applyPoolService(poolService *netv1alpha1.Poo if exist { if err := r.compareAndUpdatePoolService(currentPoolService, poolService); err != nil { - return errors.Wrapf(err, "failed to compare and update pool service %s/%s", poolService.Namespace, poolService.Name) + return errors.Wrapf( + err, + "failed to compare and update pool service %s/%s", + poolService.Namespace, + poolService.Name, + ) } return nil } @@ -445,10 +490,20 @@ func (r *ReconcileLoadBalancerSet) tryGetPoolService(namespace, name string) (*n return currentPs, true, err } -func (r *ReconcileLoadBalancerSet) compareAndUpdatePoolService(currentPoolService, desirePoolService *netv1alpha1.PoolService) error { +func (r *ReconcileLoadBalancerSet) compareAndUpdatePoolService( + currentPoolService, desirePoolService *netv1alpha1.PoolService, +) error { if currentPoolService.Labels[labelManageBy] != names.LoadBalancerSetController { - r.recorder.Eventf(currentPoolService, corev1.EventTypeWarning, "ManagedConflict", poolServiceManagedConflictEventMsgFormat, - currentPoolService.Namespace, currentPoolService.Name, currentPoolService.Namespace, desirePoolService.Labels[network.LabelServiceName]) + r.recorder.Eventf( + currentPoolService, + corev1.EventTypeWarning, + "ManagedConflict", + poolServiceManagedConflictEventMsgFormat, + currentPoolService.Namespace, + currentPoolService.Name, + currentPoolService.Namespace, + desirePoolService.Labels[network.LabelServiceName], + ) return nil } @@ -459,7 +514,14 @@ func (r *ReconcileLoadBalancerSet) compareAndUpdatePoolService(currentPoolServic return nil } - r.recorder.Eventf(currentPoolService, corev1.EventTypeWarning, "Modified", poolServiceModifiedEventMsgFormat, currentPoolService.Namespace, currentPoolService.Name) + r.recorder.Eventf( + currentPoolService, + corev1.EventTypeWarning, + "Modified", + poolServiceModifiedEventMsgFormat, + currentPoolService.Namespace, + currentPoolService.Name, + ) if err := r.Update(context.Background(), currentPoolService); err != nil { return errors.Wrapf(err, "failed to update pool service") } @@ -467,7 +529,10 @@ func (r *ReconcileLoadBalancerSet) compareAndUpdatePoolService(currentPoolServic return nil } -func compareAndUpdatePoolServiceLabel(currentPoolService *netv1alpha1.PoolService, desireLabels map[string]string) bool { +func compareAndUpdatePoolServiceLabel( + currentPoolService *netv1alpha1.PoolService, + desireLabels map[string]string, +) bool { isUpdate := false if currentPoolService.Labels[network.LabelServiceName] != desireLabels[network.LabelServiceName] { currentPoolService.Labels[network.LabelServiceName] = desireLabels[network.LabelServiceName] @@ -482,7 +547,10 @@ func compareAndUpdatePoolServiceLabel(currentPoolService *netv1alpha1.PoolServic return isUpdate } -func compareAndUpdatePoolServiceOwners(currentPoolService *netv1alpha1.PoolService, desireOwners []v1.OwnerReference) bool { +func compareAndUpdatePoolServiceOwners( + currentPoolService *netv1alpha1.PoolService, + desireOwners []v1.OwnerReference, +) bool { if !reflect.DeepEqual(currentPoolService.OwnerReferences, desireOwners) { currentPoolService.OwnerReferences = desireOwners return true @@ -503,7 +571,11 @@ func (r *ReconcileLoadBalancerSet) syncService(svc *corev1.Service) error { return r.compareAndUpdateService(svc, aggregatedLabels, aggregatedAnnotations, aggregatedLbStatus) } -func (r *ReconcileLoadBalancerSet) compareAndUpdateService(svc *corev1.Service, labels, annotations map[string]string, lbStatus corev1.LoadBalancerStatus) error { +func (r *ReconcileLoadBalancerSet) compareAndUpdateService( + svc *corev1.Service, + labels, annotations map[string]string, + lbStatus corev1.LoadBalancerStatus, +) error { isUpdatedLbStatus := compareAndUpdateServiceLbStatus(svc, lbStatus) if isUpdatedLbStatus { return r.Status().Update(context.Background(), svc) diff --git a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller_test.go b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller_test.go index 0ca57241b62..cd00aac1c41 100644 --- a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller_test.go +++ b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/load_balancer_set_controller_test.go @@ -38,7 +38,7 @@ import ( "github.com/openyurtio/openyurt/cmd/yurt-manager/names" "github.com/openyurtio/openyurt/pkg/apis" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/network" "github.com/openyurtio/openyurt/pkg/apis/network/v1alpha1" ) @@ -72,7 +72,13 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { np1 := newNodepool("np123", "name=np123,app=deploy") np2 := newNodepool("np234", "name=np234,app=deploy") np3 := newNodepool("np345", "name=np345") - c := fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(svc).WithObjects(np1).WithObjects(np3).WithObjects(np2).Build() + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(svc). + WithObjects(np1). + WithObjects(np3). + WithObjects(np2). + Build() rc := ReconcileLoadBalancerSet{ Client: c, } @@ -132,7 +138,14 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { ps1 := newPoolService(v1.NamespaceDefault, "np123", nil, nil, nil) ps2 := newPoolService(v1.NamespaceDefault, "np234", nil, nil, nil) - c := fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(svc).WithObjects(np1).WithObjects(np2).WithObjects(ps1).WithObjects(ps2).Build() + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(svc). + WithObjects(np1). + WithObjects(np2). + WithObjects(ps1). + WithObjects(ps2). + Build() rc := ReconcileLoadBalancerSet{ Client: c, @@ -157,7 +170,15 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { ps2 := newPoolService(v1.NamespaceDefault, "np234", nil, nil, []corev1.LoadBalancerIngress{{IP: "1.2.3.4"}}) ps3 := newPoolService(v1.NamespaceSystem, "np234", nil, nil, []corev1.LoadBalancerIngress{{IP: "3.4.5.6"}}) - c := fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(svc).WithObjects(np1).WithObjects(np2).WithObjects(ps1).WithObjects(ps2).WithObjects(ps3).Build() + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(svc). + WithObjects(np1). + WithObjects(np2). + WithObjects(ps1). + WithObjects(ps2). + WithObjects(ps3). + Build() rc := ReconcileLoadBalancerSet{ Client: c, @@ -182,10 +203,34 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { np1 := newNodepool("np123", "name=np123,app=deploy") np2 := newNodepool("np234", "name=np234,app=deploy") - ps1 := newPoolService(v1.NamespaceDefault, "np123", map[string]string{"lb-id": "lb34567"}, map[string]string{"lb-id": "lb34567"}, nil) - ps2 := newPoolService(v1.NamespaceDefault, "np234", map[string]string{"lb-id": "lb23456"}, map[string]string{"lb-id": "lb23456"}, nil) - ps3 := newPoolService(v1.NamespaceDefault, "np345", map[string]string{"lb-id": "lb12345"}, map[string]string{"lb-id": "lb12345"}, nil) - ps4 := newPoolService(v1.NamespaceDefault, "np456", map[string]string{"lb-id": "lb12345"}, map[string]string{"lb-id": "lb12345"}, nil) + ps1 := newPoolService( + v1.NamespaceDefault, + "np123", + map[string]string{"lb-id": "lb34567"}, + map[string]string{"lb-id": "lb34567"}, + nil, + ) + ps2 := newPoolService( + v1.NamespaceDefault, + "np234", + map[string]string{"lb-id": "lb23456"}, + map[string]string{"lb-id": "lb23456"}, + nil, + ) + ps3 := newPoolService( + v1.NamespaceDefault, + "np345", + map[string]string{"lb-id": "lb12345"}, + map[string]string{"lb-id": "lb12345"}, + nil, + ) + ps4 := newPoolService( + v1.NamespaceDefault, + "np456", + map[string]string{"lb-id": "lb12345"}, + map[string]string{"lb-id": "lb12345"}, + nil, + ) c := fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(svc).WithObjects(np1).WithObjects(np2). WithObjects(ps1).WithObjects(ps2).WithObjects(ps3).WithObjects(ps4).Build() @@ -356,7 +401,13 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { assertErrNil(t, err) eve := <-recorder.Events - expected := fmt.Sprintf("%s %s %s%s", corev1.EventTypeWarning, "NoMatchNodePool", "No node pool matches the nodepool label selector on the service", "") + expected := fmt.Sprintf( + "%s %s %s%s", + corev1.EventTypeWarning, + "NoMatchNodePool", + "No node pool matches the nodepool label selector on the service", + "", + ) assertString(t, expected, eve) }) @@ -654,7 +705,13 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { assertOwnerReferences(t, expectedOwnerReferences, newPs.OwnerReferences) eve := <-recorder.Events - expected := fmt.Sprintf("%s %s %s%s", corev1.EventTypeWarning, "Modified", "PoolService default/test-np123 resource is manually modified, the controller will overwrite this modification", "") + expected := fmt.Sprintf( + "%s %s %s%s", + corev1.EventTypeWarning, + "Modified", + "PoolService default/test-np123 resource is manually modified, the controller will overwrite this modification", + "", + ) assertString(t, expected, eve) }) @@ -669,7 +726,13 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { np := newNodepool("np123", "name=np123,app=deploy") - c := fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(svc1).WithObjects(svc2).WithObjects(ps).WithObjects(np).Build() + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(svc1). + WithObjects(svc2). + WithObjects(ps). + WithObjects(np). + Build() recorder := &record.FakeRecorder{ Events: make(chan string, 1), } @@ -748,7 +811,13 @@ func TestReconcilePoolService_Reconcile(t *testing.T) { assertString(t, newPs.Labels[network.LabelServiceName], "mock") eve := <-recorder.Events - expected := fmt.Sprintf("%s %s %s%s", corev1.EventTypeWarning, "ManagedConflict", "PoolService default/test-np123 is not managed by pool-service-controller, but the nodepool-labelselector of service default/test include it", "") + expected := fmt.Sprintf( + "%s %s %s%s", + corev1.EventTypeWarning, + "ManagedConflict", + "PoolService default/test-np123 is not managed by pool-service-controller, but the nodepool-labelselector of service default/test include it", + "", + ) assertString(t, expected, eve) }) } @@ -800,7 +869,7 @@ func newService(namespace string, name string) *corev1.Service { } } -func newNodepool(name string, labelStr string) *v1beta1.NodePool { +func newNodepool(name string, labelStr string) *v1beta2.NodePool { var splitLabels []string if labelStr != "" { splitLabels = strings.Split(labelStr, ",") @@ -812,7 +881,7 @@ func newNodepool(name string, labelStr string) *v1beta1.NodePool { labels[kv[0]] = kv[1] } - return &v1beta1.NodePool{ + return &v1beta2.NodePool{ TypeMeta: v1.TypeMeta{ Kind: "NodePool", APIVersion: "apps.openyurt.io/v1beta1", @@ -877,7 +946,12 @@ func assertPoolServiceLabels(t testing.TB, psl *v1alpha1.PoolServiceList, servic } } -func newPoolService(namespace string, poolName string, aggregatedLabels, aggregatedAnnotations map[string]string, lbIngress []corev1.LoadBalancerIngress) *v1alpha1.PoolService { +func newPoolService( + namespace string, + poolName string, + aggregatedLabels, aggregatedAnnotations map[string]string, + lbIngress []corev1.LoadBalancerIngress, +) *v1alpha1.PoolService { blockOwnerDeletion := true controller := true return &v1alpha1.PoolService{ @@ -888,7 +962,11 @@ func newPoolService(namespace string, poolName string, aggregatedLabels, aggrega ObjectMeta: v1.ObjectMeta{ Namespace: namespace, Name: mockServiceName + "-" + poolName, - Labels: map[string]string{network.LabelServiceName: mockServiceName, network.LabelNodePoolName: poolName, labelManageBy: names.LoadBalancerSetController}, + Labels: map[string]string{ + network.LabelServiceName: mockServiceName, + network.LabelNodePoolName: poolName, + labelManageBy: names.LoadBalancerSetController, + }, OwnerReferences: []v1.OwnerReference{ { APIVersion: "v1", diff --git a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/predicate.go b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/predicate.go index fa7ffde0478..a64bc32f8fb 100644 --- a/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/predicate.go +++ b/pkg/yurtmanager/controller/loadbalancerset/loadbalancerset/predicate.go @@ -24,7 +24,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/predicate" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/network" "github.com/openyurtio/openyurt/pkg/apis/network/v1alpha1" ) @@ -215,12 +215,12 @@ func managedByController(ps *v1alpha1.PoolService) bool { func NewNodePoolPredicated() predicate.Predicate { return predicate.Funcs{ UpdateFunc: func(updateEvent event.UpdateEvent) bool { - oldNp, ok := updateEvent.ObjectOld.(*v1beta1.NodePool) + oldNp, ok := updateEvent.ObjectOld.(*v1beta2.NodePool) if !ok { return false } - newNp, ok := updateEvent.ObjectNew.(*v1beta1.NodePool) + newNp, ok := updateEvent.ObjectNew.(*v1beta2.NodePool) if !ok { return false } @@ -228,21 +228,21 @@ func NewNodePoolPredicated() predicate.Predicate { return isNodePoolChange(oldNp, newNp) }, CreateFunc: func(createEvent event.CreateEvent) bool { - np, ok := createEvent.Object.(*v1beta1.NodePool) + np, ok := createEvent.Object.(*v1beta2.NodePool) if !ok { return false } return nodePoolHasLabels(np) }, DeleteFunc: func(deleteEvent event.DeleteEvent) bool { - np, ok := deleteEvent.Object.(*v1beta1.NodePool) + np, ok := deleteEvent.Object.(*v1beta2.NodePool) if !ok { return false } return nodePoolHasLabels(np) }, GenericFunc: func(genericEvent event.GenericEvent) bool { - np, ok := genericEvent.Object.(*v1beta1.NodePool) + np, ok := genericEvent.Object.(*v1beta2.NodePool) if !ok { return false } @@ -251,13 +251,13 @@ func NewNodePoolPredicated() predicate.Predicate { } } -func isNodePoolChange(oldNp, newNp *v1beta1.NodePool) bool { +func isNodePoolChange(oldNp, newNp *v1beta2.NodePool) bool { if !reflect.DeepEqual(oldNp.Labels, newNp.Labels) { return true } return false } -func nodePoolHasLabels(np *v1beta1.NodePool) bool { +func nodePoolHasLabels(np *v1beta2.NodePool) bool { return len(np.Labels) != 0 } diff --git a/pkg/yurtmanager/controller/nodebucket/node_bucket_controller.go b/pkg/yurtmanager/controller/nodebucket/node_bucket_controller.go index a9e6b961632..09fe3624e2c 100644 --- a/pkg/yurtmanager/controller/nodebucket/node_bucket_controller.go +++ b/pkg/yurtmanager/controller/nodebucket/node_bucket_controller.go @@ -41,7 +41,7 @@ import ( appconfig "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" appsv1alpha1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) @@ -83,12 +83,12 @@ func Add(_ context.Context, cfg *appconfig.CompletedConfig, mgr manager.Manager) // Watch for changes to NodeBucket if err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &appsv1alpha1.NodeBucket{}, - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1beta1.NodePool{}, handler.OnlyControllerOwner()))); err != nil { + handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &appsv1beta2.NodePool{}, handler.OnlyControllerOwner()))); err != nil { return err } // Watch nodepool create for nodebucket - if err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &appsv1beta1.NodePool{}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ + if err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &appsv1beta2.NodePool{}, &handler.EnqueueRequestForObject{}, predicate.Funcs{ CreateFunc: func(createEvent event.CreateEvent) bool { return true }, @@ -132,20 +132,22 @@ func Add(_ context.Context, cfg *appconfig.CompletedConfig, mgr manager.Manager) }, } - reconcilePool := handler.EnqueueRequestsFromMapFunc(func(ctx context.Context, obj client.Object) []reconcile.Request { - node, ok := obj.(*v1.Node) - if !ok { - return []reconcile.Request{} - } - if npName := node.Labels[projectinfo.GetNodePoolLabel()]; len(npName) != 0 { - return []reconcile.Request{ - { - NamespacedName: types.NamespacedName{Name: npName}, - }, + reconcilePool := handler.EnqueueRequestsFromMapFunc( + func(ctx context.Context, obj client.Object) []reconcile.Request { + node, ok := obj.(*v1.Node) + if !ok { + return []reconcile.Request{} } - } - return []reconcile.Request{} - }) + if npName := node.Labels[projectinfo.GetNodePoolLabel()]; len(npName) != 0 { + return []reconcile.Request{ + { + NamespacedName: types.NamespacedName{Name: npName}, + }, + } + } + return []reconcile.Request{} + }, + ) // Watch for changes to Node if err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &v1.Node{}, reconcilePool, nodePredicate)); err != nil { @@ -171,7 +173,7 @@ func (r *ReconcileNodeBucket) Reconcile(ctx context.Context, request reconcile.R klog.Info(Format("Reconcile NodePool for NodeBuckets %s/%s", request.Namespace, request.Name)) // 1. Fetch the NodePool instance - ins := &appsv1beta1.NodePool{} + ins := &appsv1beta2.NodePool{} err := r.Get(context.TODO(), request.NamespacedName, ins) if err != nil { return reconcile.Result{}, client.IgnoreNotFound(err) @@ -202,8 +204,19 @@ func (r *ReconcileNodeBucket) Reconcile(ctx context.Context, request reconcile.R } // 4. reconcile NodeBuckets based on nodes and existing NodeBuckets - bucketsToCreate, bucketsToUpdate, bucketsToDelete, bucketsUnchanged := r.reconcileNodeBuckets(ins, desiredNodeSet, &existingNodeBucketList) - klog.Infof("reconcile pool(%s): bucketsToCreate=%d, bucketsToUpdate=%d, bucketsToDelete=%d, bucketsUnchanged=%d", ins.Name, len(bucketsToCreate), len(bucketsToUpdate), len(bucketsToDelete), len(bucketsUnchanged)) + bucketsToCreate, bucketsToUpdate, bucketsToDelete, bucketsUnchanged := r.reconcileNodeBuckets( + ins, + desiredNodeSet, + &existingNodeBucketList, + ) + klog.Infof( + "reconcile pool(%s): bucketsToCreate=%d, bucketsToUpdate=%d, bucketsToDelete=%d, bucketsUnchanged=%d", + ins.Name, + len(bucketsToCreate), + len(bucketsToUpdate), + len(bucketsToDelete), + len(bucketsUnchanged), + ) // 5.finalize creates, updates, and deletes buckets as specified if err = finalize(ctx, r.Client, bucketsToCreate, bucketsToUpdate, bucketsToDelete); err != nil { @@ -215,13 +228,17 @@ func (r *ReconcileNodeBucket) Reconcile(ctx context.Context, request reconcile.R } func (r *ReconcileNodeBucket) reconcileNodeBuckets( - pool *appsv1beta1.NodePool, + pool *appsv1beta2.NodePool, desiredNodeSet sets.Set[string], buckets *appsv1alpha1.NodeBucketList, ) ([]*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket) { - bucketsUnchanged, bucketsToUpdate, bucketsToDelete, unFilledNodeSet := resolveExistingBuckets(buckets, desiredNodeSet) - klog.V(4).Infof("reconcileNodeBuckets for pool(%s), len(bucketsUnchanged)=%d, len(bucketsToUpdate)=%d, len(bucketsToDelete)=%d, unFilledNodeSet=%v", - pool.Name, len(bucketsUnchanged), len(bucketsToUpdate), len(bucketsToDelete), unFilledNodeSet.UnsortedList()) + bucketsUnchanged, bucketsToUpdate, bucketsToDelete, unFilledNodeSet := resolveExistingBuckets( + buckets, + desiredNodeSet, + ) + klog.V(4). + Infof("reconcileNodeBuckets for pool(%s), len(bucketsUnchanged)=%d, len(bucketsToUpdate)=%d, len(bucketsToDelete)=%d, unFilledNodeSet=%v", + pool.Name, len(bucketsUnchanged), len(bucketsToUpdate), len(bucketsToDelete), unFilledNodeSet.UnsortedList()) // If we still have unfilled nodes to add and buckets marked for update, // iterate through the buckets and fill them up with the unfilled nodes. @@ -234,8 +251,9 @@ func (r *ReconcileNodeBucket) reconcileNodeBuckets( } } } - klog.V(4).Infof("reconcileNodeBuckets for pool(%s) after filling bucketsToUpdate, len(bucketsUnchanged)=%d, len(bucketsToUpdate)=%d, len(bucketsToDelete)=%d, unFilledNodeSet=%v", - pool.Name, len(bucketsUnchanged), len(bucketsToUpdate), len(bucketsToDelete), unFilledNodeSet.UnsortedList()) + klog.V(4). + Infof("reconcileNodeBuckets for pool(%s) after filling bucketsToUpdate, len(bucketsUnchanged)=%d, len(bucketsToUpdate)=%d, len(bucketsToDelete)=%d, unFilledNodeSet=%v", + pool.Name, len(bucketsUnchanged), len(bucketsToUpdate), len(bucketsToDelete), unFilledNodeSet.UnsortedList()) // If there are still unfilled nodes left at this point, we try to fit the nodes in a single existing buckets. // If there are no buckets with that capacity, we create new buckets for the nodes. @@ -263,14 +281,18 @@ func (r *ReconcileNodeBucket) reconcileNodeBuckets( bucketToFill.Nodes = append(bucketToFill.Nodes, appsv1alpha1.Node{Name: nodeName}) } } - klog.V(4).Infof("reconcileNodeBuckets for pool(%s) after filling bucketsUnchanged, len(bucketsUnchanged)=%d, len(bucketsToCreate)=%d len(bucketsToUpdate)=%v, len(bucketsToDelete)=%d, unFilledNodeSet=%v", - pool.Name, len(bucketsUnchanged), len(bucketsToCreate), len(bucketsToUpdate), len(bucketsToDelete), unFilledNodeSet.UnsortedList()) + klog.V(4). + Infof("reconcileNodeBuckets for pool(%s) after filling bucketsUnchanged, len(bucketsUnchanged)=%d, len(bucketsToCreate)=%d len(bucketsToUpdate)=%v, len(bucketsToDelete)=%d, unFilledNodeSet=%v", + pool.Name, len(bucketsUnchanged), len(bucketsToCreate), len(bucketsToUpdate), len(bucketsToDelete), unFilledNodeSet.UnsortedList()) return bucketsToCreate, bucketsToUpdate, bucketsToDelete, bucketsUnchanged } // resolveExistingBuckets iterates through existing node buckets to delete nodes no longer desired and update node buckets that have changed -func resolveExistingBuckets(buckets *appsv1alpha1.NodeBucketList, desiredNodeSet sets.Set[string]) ([]*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket, sets.Set[string]) { +func resolveExistingBuckets( + buckets *appsv1alpha1.NodeBucketList, + desiredNodeSet sets.Set[string], +) ([]*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket, []*appsv1alpha1.NodeBucket, sets.Set[string]) { bucketsUnchanged := []*appsv1alpha1.NodeBucket{} bucketsToUpdate := []*appsv1alpha1.NodeBucket{} bucketsToDelete := []*appsv1alpha1.NodeBucket{} @@ -321,8 +343,8 @@ func getBucketToFill(buckets []*appsv1alpha1.NodeBucket, numNodes, maxNodes int) return index, closestBucket } -func newNodeBucket(pool *appsv1beta1.NodePool) *appsv1alpha1.NodeBucket { - gvk := appsv1beta1.GroupVersion.WithKind("NodePool") +func newNodeBucket(pool *appsv1beta2.NodePool) *appsv1alpha1.NodeBucket { + gvk := appsv1beta2.GroupVersion.WithKind("NodePool") ownerRef := metav1.NewControllerRef(pool, gvk) bucket := &appsv1alpha1.NodeBucket{ ObjectMeta: metav1.ObjectMeta{ @@ -337,7 +359,11 @@ func newNodeBucket(pool *appsv1beta1.NodePool) *appsv1alpha1.NodeBucket { return bucket } -func finalize(ctx context.Context, c client.Client, bucketsToCreate, bucketsToUpdate, bucketsToDelete []*appsv1alpha1.NodeBucket) error { +func finalize( + ctx context.Context, + c client.Client, + bucketsToCreate, bucketsToUpdate, bucketsToDelete []*appsv1alpha1.NodeBucket, +) error { // If there are buckets to create and delete, change the creates to updates of the buckets that would otherwise be deleted. for i := 0; i < len(bucketsToDelete); { if len(bucketsToCreate) == 0 { diff --git a/pkg/yurtmanager/controller/nodebucket/node_bucket_controller_test.go b/pkg/yurtmanager/controller/nodebucket/node_bucket_controller_test.go index 95477f954d5..46fa5989703 100644 --- a/pkg/yurtmanager/controller/nodebucket/node_bucket_controller_test.go +++ b/pkg/yurtmanager/controller/nodebucket/node_bucket_controller_test.go @@ -32,7 +32,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis" appsalphav1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) @@ -71,7 +71,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -123,7 +123,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -193,7 +193,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -279,7 +279,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -349,7 +349,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -408,7 +408,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -491,7 +491,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -577,7 +577,7 @@ func TestReconcile(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, diff --git a/pkg/yurtmanager/controller/nodepool/nodepool_controller.go b/pkg/yurtmanager/controller/nodepool/nodepool_controller.go index ad2b071de08..bbe1835c3a0 100644 --- a/pkg/yurtmanager/controller/nodepool/nodepool_controller.go +++ b/pkg/yurtmanager/controller/nodepool/nodepool_controller.go @@ -34,13 +34,13 @@ import ( yurtClient "github.com/openyurtio/openyurt/cmd/yurt-manager/app/client" "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" poolconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodepool/config" ) var ( - controllerResource = appsv1beta1.SchemeGroupVersion.WithResource("nodepools") + controllerResource = appsv1beta2.SchemeGroupVersion.WithResource("nodepools") ) func Format(format string, args ...interface{}) string { @@ -81,7 +81,9 @@ func Add(ctx context.Context, c *config.CompletedConfig, mgr manager.Manager) er } // Watch for changes to NodePool - err = ctrl.Watch(source.Kind[client.Object](mgr.GetCache(), &appsv1beta1.NodePool{}, &handler.EnqueueRequestForObject{})) + err = ctrl.Watch( + source.Kind[client.Object](mgr.GetCache(), &appsv1beta2.NodePool{}, &handler.EnqueueRequestForObject{}), + ) if err != nil { return err } @@ -118,7 +120,7 @@ func (r *ReconcileNodePool) Reconcile(ctx context.Context, req reconcile.Request // @kadisi klog.Info(Format("Reconcile NodePool %s", req.Name)) - var nodePool appsv1beta1.NodePool + var nodePool appsv1beta2.NodePool // try to reconcile the NodePool object if err := r.Get(ctx, req.NamespacedName, &nodePool); err != nil { return ctrl.Result{}, client.IgnoreNotFound(err) diff --git a/pkg/yurtmanager/controller/nodepool/nodepool_controller_test.go b/pkg/yurtmanager/controller/nodepool/nodepool_controller_test.go index a489e39fd71..310245ef41f 100644 --- a/pkg/yurtmanager/controller/nodepool/nodepool_controller_test.go +++ b/pkg/yurtmanager/controller/nodepool/nodepool_controller_test.go @@ -31,7 +31,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/openyurtio/openyurt/pkg/apis" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" poolconfig "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/nodepool/config" ) @@ -100,34 +100,34 @@ func prepareNodes() []client.Object { func prepareNodePools() []client.Object { pools := []client.Object{ - &appsv1beta1.NodePool{ + &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "hangzhou", }, }, }, - &appsv1beta1.NodePool{ + &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "beijing", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "beijing", }, }, }, - &appsv1beta1.NodePool{ + &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "shanghai", }, @@ -146,27 +146,32 @@ func TestReconcile(t *testing.T) { } apis.AddToScheme(scheme) - c := fakeclient.NewClientBuilder().WithScheme(scheme).WithObjects(pools...).WithStatusSubresource(pools...).WithObjects(nodes...).Build() + c := fakeclient.NewClientBuilder(). + WithScheme(scheme). + WithObjects(pools...). + WithStatusSubresource(pools...). + WithObjects(nodes...). + Build() testcases := map[string]struct { EnableSyncNodePoolConfigurations bool pool string - wantedPool *appsv1beta1.NodePool + wantedPool *appsv1beta2.NodePool wantedNodes []corev1.Node err error }{ "reconcile hangzhou pool": { pool: "hangzhou", - wantedPool: &appsv1beta1.NodePool{ + wantedPool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "hangzhou", }, }, - Status: appsv1beta1.NodePoolStatus{ + Status: appsv1beta2.NodePoolStatus{ ReadyNodeNum: 1, UnreadyNodeNum: 1, Nodes: []string{"node1", "node2"}, @@ -176,17 +181,17 @@ func TestReconcile(t *testing.T) { "reconcile beijing pool": { EnableSyncNodePoolConfigurations: true, pool: "beijing", - wantedPool: &appsv1beta1.NodePool{ + wantedPool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "beijing", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "beijing", }, }, - Status: appsv1beta1.NodePoolStatus{ + Status: appsv1beta2.NodePoolStatus{ ReadyNodeNum: 1, UnreadyNodeNum: 1, Nodes: []string{"node3", "node4"}, @@ -229,17 +234,17 @@ func TestReconcile(t *testing.T) { }, "reconcile shanghai pool without nodes": { pool: "shanghai", - wantedPool: &appsv1beta1.NodePool{ + wantedPool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "shanghai", }, }, - Status: appsv1beta1.NodePoolStatus{ + Status: appsv1beta2.NodePoolStatus{ ReadyNodeNum: 0, UnreadyNodeNum: 0, }, @@ -263,7 +268,7 @@ func TestReconcile(t *testing.T) { return } - var wantedPool appsv1beta1.NodePool + var wantedPool appsv1beta2.NodePool if err := r.Get(ctx, req.NamespacedName, &wantedPool); err != nil { t.Errorf("Reconcile() error = %v", err) return diff --git a/pkg/yurtmanager/controller/nodepool/util.go b/pkg/yurtmanager/controller/nodepool/util.go index c502723d530..b57c467734e 100644 --- a/pkg/yurtmanager/controller/nodepool/util.go +++ b/pkg/yurtmanager/controller/nodepool/util.go @@ -24,13 +24,13 @@ import ( corev1 "k8s.io/api/core/v1" "github.com/openyurtio/openyurt/pkg/apis/apps" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" nodeutil "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util/node" ) // conciliatePoolRelatedAttrs will update the node's attributes that related to // the nodepool -func conciliateNode(node *corev1.Node, nodePool *appsv1beta1.NodePool) (bool, error) { +func conciliateNode(node *corev1.Node, nodePool *appsv1beta2.NodePool) (bool, error) { // update node attr newNpra := &NodePoolRelatedAttributes{ Labels: nodePool.Spec.Labels, @@ -109,7 +109,7 @@ func conciliateNodePoolStatus( readyNode, notReadyNode int32, nodes []string, - nodePool *appsv1beta1.NodePool) (needUpdate bool) { + nodePool *appsv1beta2.NodePool) (needUpdate bool) { if readyNode != nodePool.Status.ReadyNodeNum { nodePool.Status.ReadyNodeNum = readyNode @@ -208,7 +208,8 @@ func areNodePoolRelatedAttributesEqual(a, b *NodePoolRelatedAttributes) bool { } isLabelsEqual := (len(a.Labels) == 0 && len(b.Labels) == 0) || reflect.DeepEqual(a.Labels, b.Labels) - isAnnotationsEqual := (len(a.Annotations) == 0 && len(b.Annotations) == 0) || reflect.DeepEqual(a.Annotations, b.Annotations) + isAnnotationsEqual := (len(a.Annotations) == 0 && len(b.Annotations) == 0) || + reflect.DeepEqual(a.Annotations, b.Annotations) isTaintsEqual := (len(a.Taints) == 0 && len(b.Taints) == 0) || reflect.DeepEqual(a.Taints, b.Taints) return isLabelsEqual && isAnnotationsEqual && isTaintsEqual diff --git a/pkg/yurtmanager/controller/nodepool/util_test.go b/pkg/yurtmanager/controller/nodepool/util_test.go index 8afb6065506..69ab96db27a 100644 --- a/pkg/yurtmanager/controller/nodepool/util_test.go +++ b/pkg/yurtmanager/controller/nodepool/util_test.go @@ -25,14 +25,14 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/openyurtio/openyurt/pkg/apis/apps" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) func TestConcilateNode(t *testing.T) { testcases := map[string]struct { initNpra *NodePoolRelatedAttributes mockNode corev1.Node - pool appsv1beta1.NodePool + pool appsv1beta2.NodePool wantedNodeExcludeAttribute corev1.Node updated bool }{ @@ -56,8 +56,8 @@ func TestConcilateNode(t *testing.T) { }, }, }, - pool: appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + pool: appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Labels: map[string]string{ "poollabel1": "value1", "poollabel2": "value2", @@ -148,8 +148,8 @@ func TestConcilateNode(t *testing.T) { }, }, }, - pool: appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + pool: appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Labels: map[string]string{ "poollabel1": "value1", "poollabel2": "value2", @@ -243,8 +243,8 @@ func TestConcilateNode(t *testing.T) { }, }, }, - pool: appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + pool: appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Labels: map[string]string{ "label2": "value2", }, @@ -312,8 +312,8 @@ func TestConcilateNode(t *testing.T) { }, }, }, - pool: appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + pool: appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Labels: map[string]string{ "label2": "value2", "poollabel2": "value2", @@ -372,8 +372,8 @@ func TestConcilateNode(t *testing.T) { }, }, }, - pool: appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + pool: appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Labels: map[string]string{}, Annotations: map[string]string{}, Taints: []corev1.Taint{}, @@ -557,15 +557,15 @@ func TestConciliateNodePoolStatus(t *testing.T) { readyNodes int32 notReadyNodes int32 nodes []string - pool *appsv1beta1.NodePool + pool *appsv1beta2.NodePool needUpdated bool }{ "status is needed to update": { readyNodes: 5, notReadyNodes: 2, nodes: []string{"foo", "bar", "cat", "zxxde"}, - pool: &appsv1beta1.NodePool{ - Status: appsv1beta1.NodePoolStatus{ + pool: &appsv1beta2.NodePool{ + Status: appsv1beta2.NodePoolStatus{ ReadyNodeNum: 2, UnreadyNodeNum: 3, Nodes: []string{"foo", "bar", "cat", "zxxde", "lucky"}, @@ -577,8 +577,8 @@ func TestConciliateNodePoolStatus(t *testing.T) { readyNodes: 2, notReadyNodes: 2, nodes: []string{"foo", "bar", "cat", "zxxde"}, - pool: &appsv1beta1.NodePool{ - Status: appsv1beta1.NodePoolStatus{ + pool: &appsv1beta2.NodePool{ + Status: appsv1beta2.NodePoolStatus{ ReadyNodeNum: 2, UnreadyNodeNum: 2, Nodes: []string{"foo", "bar", "cat", "zxxde"}, @@ -590,11 +590,11 @@ func TestConciliateNodePoolStatus(t *testing.T) { readyNodes: 0, notReadyNodes: 0, nodes: []string{}, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", }, - Status: appsv1beta1.NodePoolStatus{ + Status: appsv1beta2.NodePoolStatus{ ReadyNodeNum: 0, UnreadyNodeNum: 0, Nodes: []string{}, @@ -606,7 +606,7 @@ func TestConciliateNodePoolStatus(t *testing.T) { readyNodes: 0, notReadyNodes: 0, nodes: []string{}, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "foo", }, @@ -667,7 +667,13 @@ func TestContainTaint(t *testing.T) { t.Run(k, func(t *testing.T) { gotIndex, gotBool := containTaint(tc.inputTaint, mockTaints) if gotIndex != tc.resultIndex || gotBool != tc.isContained { - t.Errorf("Expected index %v and bool %v, got index %v and bool %v", tc.resultIndex, tc.isContained, gotIndex, gotBool) + t.Errorf( + "Expected index %v and bool %v, got index %v and bool %v", + tc.resultIndex, + tc.isContained, + gotIndex, + gotBool, + ) } }) } diff --git a/pkg/yurtmanager/controller/yurtappset/workloadmanager/deployment_manager_test.go b/pkg/yurtmanager/controller/yurtappset/workloadmanager/deployment_manager_test.go index 1946999087e..8f202d09878 100644 --- a/pkg/yurtmanager/controller/yurtappset/workloadmanager/deployment_manager_test.go +++ b/pkg/yurtmanager/controller/yurtappset/workloadmanager/deployment_manager_test.go @@ -26,6 +26,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) var testYAS = &v1beta1.YurtAppSet{ @@ -90,11 +91,11 @@ var testYAS = &v1beta1.YurtAppSet{ }, } -var testNp = &v1beta1.NodePool{ +var testNp = &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-nodepool", }, - Spec: v1beta1.NodePoolSpec{ + Spec: v1beta2.NodePoolSpec{ HostNetwork: false, }, } diff --git a/pkg/yurtmanager/controller/yurtappset/workloadmanager/statefulset_manager_test.go b/pkg/yurtmanager/controller/yurtappset/workloadmanager/statefulset_manager_test.go index 2a8d69db0b9..af8d5d85b73 100644 --- a/pkg/yurtmanager/controller/yurtappset/workloadmanager/statefulset_manager_test.go +++ b/pkg/yurtmanager/controller/yurtappset/workloadmanager/statefulset_manager_test.go @@ -26,6 +26,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) var stsYAS = &v1beta1.YurtAppSet{ @@ -88,11 +89,11 @@ var stsYAS = &v1beta1.YurtAppSet{ }, } -var stsNp = &v1beta1.NodePool{ +var stsNp = &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-nodepool", }, - Spec: v1beta1.NodePoolSpec{ + Spec: v1beta2.NodePoolSpec{ HostNetwork: false, }, } diff --git a/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks.go b/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks.go index 3011f4ca7dc..4e7f03e3e04 100644 --- a/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks.go +++ b/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks.go @@ -29,19 +29,25 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) -func GetNodePoolTweaksFromYurtAppSet(cli client.Client, nodepoolName string, yas *v1beta1.YurtAppSet) (tweaksList []*v1beta1.Tweaks, err error) { +func GetNodePoolTweaksFromYurtAppSet( + cli client.Client, + nodepoolName string, + yas *v1beta1.YurtAppSet, +) (tweaksList []*v1beta1.Tweaks, err error) { tweaksList = []*v1beta1.Tweaks{} - np := v1beta1.NodePool{} + np := v1beta2.NodePool{} if err = cli.Get(context.TODO(), client.ObjectKey{Name: nodepoolName}, &np); err != nil { return } for _, yasTweak := range yas.Spec.Workload.WorkloadTweaks { if isNodePoolRelated(&np, yasTweak.Pools, yasTweak.NodePoolSelector) { - klog.V(4).Infof("nodepool %s is related to yurtappset %s/%s, add tweaks", nodepoolName, yas.Namespace, yas.Name) + klog.V(4). + Infof("nodepool %s is related to yurtappset %s/%s, add tweaks", nodepoolName, yas.Namespace, yas.Name) tweaksCopy := yasTweak.Tweaks tweaksList = append(tweaksList, &tweaksCopy) } @@ -73,20 +79,23 @@ func ApplyTweaksToStatefulSet(statefulset *v1.StatefulSet, tweaks []*v1beta1.Twe func applyBasicTweaksToDeployment(deployment *v1.Deployment, basicTweaks []*v1beta1.Tweaks) { for _, item := range basicTweaks { if item.Replicas != nil { - klog.V(4).Infof("Apply BasicTweaks successfully: overwrite replicas to %d in deployment %s/%s", *item.Replicas, deployment.Name, deployment.Namespace) + klog.V(4). + Infof("Apply BasicTweaks successfully: overwrite replicas to %d in deployment %s/%s", *item.Replicas, deployment.Name, deployment.Namespace) deployment.Spec.Replicas = item.Replicas } for _, item := range item.ContainerImages { for i := range deployment.Spec.Template.Spec.Containers { if deployment.Spec.Template.Spec.Containers[i].Name == item.Name { - klog.V(5).Infof("Apply BasicTweaks successfully: overwrite container %s 's image to %s in deployment %s/%s", item.Name, item.TargetImage, deployment.Name, deployment.Namespace) + klog.V(5). + Infof("Apply BasicTweaks successfully: overwrite container %s 's image to %s in deployment %s/%s", item.Name, item.TargetImage, deployment.Name, deployment.Namespace) deployment.Spec.Template.Spec.Containers[i].Image = item.TargetImage } } for i := range deployment.Spec.Template.Spec.InitContainers { if deployment.Spec.Template.Spec.InitContainers[i].Name == item.Name { - klog.V(5).Infof("Apply BasicTweaks successfully: overwrite init container %s 's image to %s in deployment %s/%s", item.Name, item.TargetImage, deployment.Name, deployment.Namespace) + klog.V(5). + Infof("Apply BasicTweaks successfully: overwrite init container %s 's image to %s in deployment %s/%s", item.Name, item.TargetImage, deployment.Name, deployment.Namespace) deployment.Spec.Template.Spec.InitContainers[i].Image = item.TargetImage } } @@ -98,19 +107,22 @@ func applyBasicTweaksToDeployment(deployment *v1.Deployment, basicTweaks []*v1be func applyBasicTweaksToStatefulSet(statefulset *v1.StatefulSet, basicTweaks []*v1beta1.Tweaks) { for _, item := range basicTweaks { if item.Replicas != nil { - klog.V(4).Infof("Apply BasicTweaks successfully: overwrite replicas to %d in statefulset %s/%s", *item.Replicas, statefulset.Name, statefulset.Namespace) + klog.V(4). + Infof("Apply BasicTweaks successfully: overwrite replicas to %d in statefulset %s/%s", *item.Replicas, statefulset.Name, statefulset.Namespace) statefulset.Spec.Replicas = item.Replicas } for _, item := range item.ContainerImages { for i := range statefulset.Spec.Template.Spec.Containers { if statefulset.Spec.Template.Spec.Containers[i].Name == item.Name { - klog.V(5).Infof("Apply BasicTweaks successfully: overwrite container %s 's image to %s in statefulset %s/%s", item.Name, item.TargetImage, statefulset.Name, statefulset.Namespace) + klog.V(5). + Infof("Apply BasicTweaks successfully: overwrite container %s 's image to %s in statefulset %s/%s", item.Name, item.TargetImage, statefulset.Name, statefulset.Namespace) statefulset.Spec.Template.Spec.Containers[i].Image = item.TargetImage } } for i := range statefulset.Spec.Template.Spec.InitContainers { if statefulset.Spec.Template.Spec.InitContainers[i].Name == item.Name { - klog.V(5).Infof("Apply BasicTweaks successfully: overwrite init container %s 's image to %s in statefulset %s/%s", item.Name, item.TargetImage, statefulset.Name, statefulset.Namespace) + klog.V(5). + Infof("Apply BasicTweaks successfully: overwrite init container %s 's image to %s in statefulset %s/%s", item.Name, item.TargetImage, statefulset.Name, statefulset.Namespace) statefulset.Spec.Template.Spec.InitContainers[i].Image = item.TargetImage } } @@ -196,7 +208,9 @@ func preparePatchOperations(tweaks []*v1beta1.Tweaks, poolName string) []patchOp for _, tweak := range tweaks { for _, patch := range tweak.Patches { if strings.Contains(string(patch.Value.Raw), "{{nodepool-name}}") { - patch.Value = apiextensionsv1.JSON{Raw: []byte(strings.ReplaceAll(string(patch.Value.Raw), "{{nodepool-name}}", poolName))} + patch.Value = apiextensionsv1.JSON{ + Raw: []byte(strings.ReplaceAll(string(patch.Value.Raw), "{{nodepool-name}}", poolName)), + } } patchOperations = append(patchOperations, patchOperation{ Op: string(patch.Operation), diff --git a/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks_test.go b/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks_test.go index 230983e8d8c..df62237a2c8 100644 --- a/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks_test.go +++ b/pkg/yurtmanager/controller/yurtappset/workloadmanager/tweaks_test.go @@ -32,6 +32,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis" "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) var ( @@ -154,7 +155,7 @@ func TestGetNodePoolTweaksFromYurtAppSet(t *testing.T) { name: "nodepool matches yurtappset", args: args{ cli: fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects( - &v1beta1.NodePool{ObjectMeta: metav1.ObjectMeta{ + &v1beta2.NodePool{ObjectMeta: metav1.ObjectMeta{ Name: "test-nodepool", }}, &v1beta1.YurtAppSet{ @@ -197,7 +198,7 @@ func TestGetNodePoolTweaksFromYurtAppSet(t *testing.T) { name: "no nodepool selector or pools specified", args: args{ cli: fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects( - &v1beta1.NodePool{ObjectMeta: metav1.ObjectMeta{ + &v1beta2.NodePool{ObjectMeta: metav1.ObjectMeta{ Name: "test-nodepool", }}, &v1beta1.YurtAppSet{ @@ -217,7 +218,7 @@ func TestGetNodePoolTweaksFromYurtAppSet(t *testing.T) { name: "nodepool selector match", args: args{ cli: fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects( - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-nodepool", Labels: map[string]string{ diff --git a/pkg/yurtmanager/controller/yurtappset/workloadmanager/util.go b/pkg/yurtmanager/controller/yurtappset/workloadmanager/util.go index 1a8c52d5b17..7ee6f2e5fb7 100644 --- a/pkg/yurtmanager/controller/yurtappset/workloadmanager/util.go +++ b/pkg/yurtmanager/controller/yurtappset/workloadmanager/util.go @@ -28,6 +28,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) @@ -69,11 +70,15 @@ func GetNodePoolsFromYurtAppSet(cli client.Client, yas *v1beta1.YurtAppSet) (npN // Get NodePools selected by pools and npSelector // If specified pool does not exist, it will skip -func getSelectedNodepools(cli client.Client, pools []string, npSelector *metav1.LabelSelector) (selectedNps sets.Set[string], err error) { +func getSelectedNodepools( + cli client.Client, + pools []string, + npSelector *metav1.LabelSelector, +) (selectedNps sets.Set[string], err error) { selectedNps = sets.New[string]() // get all nodepools - allNps := v1beta1.NodePoolList{} + allNps := v1beta2.NodePoolList{} err = cli.List(context.TODO(), &allNps) if err != nil { return nil, err diff --git a/pkg/yurtmanager/controller/yurtappset/workloadmanager/util_test.go b/pkg/yurtmanager/controller/yurtappset/workloadmanager/util_test.go index be095926be1..f1c4b5ebac4 100644 --- a/pkg/yurtmanager/controller/yurtappset/workloadmanager/util_test.go +++ b/pkg/yurtmanager/controller/yurtappset/workloadmanager/util_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -28,6 +29,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) @@ -150,7 +152,8 @@ func TestGetNodePoolsFromYurtAppSet(t *testing.T) { } scheme := runtime.NewScheme() - assert.Nil(t, v1beta1.AddToScheme(scheme)) + require.NoError(t, v1beta1.AddToScheme(scheme)) + require.NoError(t, v1beta2.AddToScheme(scheme)) tests := []struct { name string @@ -189,7 +192,7 @@ func TestGetNodePoolsFromYurtAppSet(t *testing.T) { // TestIsNodePoolRelated 测试isNodePoolRelated函数 func TestIsNodePoolRelated(t *testing.T) { // 测试用例1: pools为空,npSelector不为空,匹配成功 - nodePool := &v1beta1.NodePool{ + nodePool := &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "nodepool1", Labels: map[string]string{"label": "value"}, diff --git a/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller.go b/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller.go index 0ee59f57370..0f6dafd23ff 100644 --- a/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller.go +++ b/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller.go @@ -52,6 +52,7 @@ import ( "github.com/openyurtio/openyurt/cmd/yurt-manager/app/config" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" unitv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + unitv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/util" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/yurtappset/workloadmanager" ) @@ -106,7 +107,14 @@ func newReconciler(c *config.CompletedConfig, mgr manager.Manager) reconcile.Rec // add adds a new Controller to mgr with r as the reconcile.Reconciler func add(mgr manager.Manager, cfg *config.CompletedConfig, r reconcile.Reconciler) error { // Create a new controller - c, err := controller.New(names.YurtAppSetController, mgr, controller.Options{Reconciler: r, MaxConcurrentReconciles: int(cfg.ComponentConfig.YurtAppSetController.ConcurrentYurtAppSetWorkers)}) + c, err := controller.New( + names.YurtAppSetController, + mgr, + controller.Options{ + Reconciler: r, + MaxConcurrentReconciles: int(cfg.ComponentConfig.YurtAppSetController.ConcurrentYurtAppSetWorkers), + }, + ) if err != nil { return err } @@ -119,11 +127,11 @@ func add(mgr manager.Manager, cfg *config.CompletedConfig, r reconcile.Reconcile return true }, UpdateFunc: func(evt event.UpdateEvent) bool { - oldNodePool, ok := evt.ObjectOld.(*unitv1beta1.NodePool) + oldNodePool, ok := evt.ObjectOld.(*unitv1beta2.NodePool) if !ok { return false } - newNodePool, ok := evt.ObjectNew.(*unitv1beta1.NodePool) + newNodePool, ok := evt.ObjectNew.(*unitv1beta2.NodePool) if !ok { return false } @@ -155,18 +163,35 @@ func add(mgr manager.Manager, cfg *config.CompletedConfig, r reconcile.Reconcile return } - err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &unitv1beta1.NodePool{}, handler.EnqueueRequestsFromMapFunc(nodePoolToYurtAppSet), nodePoolPredicate)) + err = c.Watch( + source.Kind[client.Object]( + mgr.GetCache(), + &unitv1beta2.NodePool{}, + handler.EnqueueRequestsFromMapFunc(nodePoolToYurtAppSet), + nodePoolPredicate, + ), + ) if err != nil { return err } - err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &unitv1beta1.YurtAppSet{}, &handler.EnqueueRequestForObject{})) + err = c.Watch( + source.Kind[client.Object](mgr.GetCache(), &unitv1beta1.YurtAppSet{}, &handler.EnqueueRequestForObject{}), + ) if err != nil { return err } - err = c.Watch(source.Kind[client.Object](mgr.GetCache(), &appsv1.Deployment{}, - handler.EnqueueRequestForOwner(mgr.GetScheme(), mgr.GetRESTMapper(), &unitv1beta1.YurtAppSet{}, handler.OnlyControllerOwner()))) + err = c.Watch(source.Kind[client.Object]( + mgr.GetCache(), + &appsv1.Deployment{}, + handler.EnqueueRequestForOwner( + mgr.GetScheme(), + mgr.GetRESTMapper(), + &unitv1beta1.YurtAppSet{}, + handler.OnlyControllerOwner(), + ), + )) if err != nil { return err } @@ -196,7 +221,10 @@ type ReconcileYurtAppSet struct { // Reconcile reads that state of the cluster for a YurtAppSet object and makes changes based on the state read // and what is in the YurtAppSet.Spec -func (r *ReconcileYurtAppSet) Reconcile(_ context.Context, request reconcile.Request) (res reconcile.Result, err error) { +func (r *ReconcileYurtAppSet) Reconcile( + _ context.Context, + request reconcile.Request, +) (res reconcile.Result, err error) { klog.V(2).Infof("Reconcile YurtAppSet %s/%s Start.", request.Namespace, request.Name) res = reconcile.Result{} @@ -220,7 +248,12 @@ func (r *ReconcileYurtAppSet) Reconcile(_ context.Context, request reconcile.Req yasStatus.CollisionCount = &collisionCount if err != nil { klog.Errorf("could not construct controller revision of YurtAppSet %s/%s: %s", yas.Namespace, yas.Name, err) - r.recorder.Event(yas.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed%s", eventTypeRevisionProvision), err.Error()) + r.recorder.Event( + yas.DeepCopy(), + corev1.EventTypeWarning, + fmt.Sprintf("Failed%s", eventTypeRevisionProvision), + err.Error(), + ) return } @@ -244,15 +277,31 @@ func (r *ReconcileYurtAppSet) Reconcile(_ context.Context, request reconcile.Req return } -func (r *ReconcileYurtAppSet) getNodePoolsFromYurtAppSet(yas *unitv1beta1.YurtAppSet, newStatus *unitv1beta1.YurtAppSetStatus) (npNames sets.Set[string], err error) { +func (r *ReconcileYurtAppSet) getNodePoolsFromYurtAppSet( + yas *unitv1beta1.YurtAppSet, + newStatus *unitv1beta1.YurtAppSetStatus, +) (npNames sets.Set[string], err error) { expectedNps, err := workloadmanager.GetNodePoolsFromYurtAppSet(r.Client, yas) if err != nil { return nil, err } if expectedNps.Len() == 0 { klog.V(4).Infof("No NodePools found for YurtAppSet %s/%s", yas.Namespace, yas.Name) - r.recorder.Event(yas.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("No%s", eventTypeFindPools), fmt.Sprintf("There are no matched nodepools for YurtAppSet %s/%s", yas.Namespace, yas.Name)) - SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetPoolFound, corev1.ConditionFalse, fmt.Sprintf("No%s", eventTypeFindPools), "There are no matched nodepools for YurtAppSet")) + r.recorder.Event( + yas.DeepCopy(), + corev1.EventTypeWarning, + fmt.Sprintf("No%s", eventTypeFindPools), + fmt.Sprintf("There are no matched nodepools for YurtAppSet %s/%s", yas.Namespace, yas.Name), + ) + SetYurtAppSetCondition( + newStatus, + NewYurtAppSetCondition( + unitv1beta1.AppSetPoolFound, + corev1.ConditionFalse, + fmt.Sprintf("No%s", eventTypeFindPools), + "There are no matched nodepools for YurtAppSet", + ), + ) } else { klog.V(4).Infof("NodePools matched for YurtAppSet %s/%s: %v", yas.Namespace, yas.Name, expectedNps.UnsortedList()) SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetPoolFound, corev1.ConditionTrue, eventTypeFindPools, fmt.Sprintf("There are %d matched nodepools: %v", expectedNps.Len(), expectedNps.UnsortedList()))) @@ -260,7 +309,9 @@ func (r *ReconcileYurtAppSet) getNodePoolsFromYurtAppSet(yas *unitv1beta1.YurtAp return expectedNps, nil } -func (r *ReconcileYurtAppSet) getWorkloadManagerFromYurtAppSet(yas *unitv1beta1.YurtAppSet) (workloadmanager.WorkloadManager, error) { +func (r *ReconcileYurtAppSet) getWorkloadManagerFromYurtAppSet( + yas *unitv1beta1.YurtAppSet, +) (workloadmanager.WorkloadManager, error) { switch { case yas.Spec.Workload.WorkloadTemplate.StatefulSetTemplate != nil: return r.workloadManagers[workloadmanager.StatefulSetTemplateType], nil @@ -273,8 +324,12 @@ func (r *ReconcileYurtAppSet) getWorkloadManagerFromYurtAppSet(yas *unitv1beta1. } } -func classifyWorkloads(yas *unitv1beta1.YurtAppSet, currentWorkloads []metav1.Object, - expectedNodePools sets.Set[string], expectedRevision string) (needDeleted, needUpdate []metav1.Object, needCreate []string) { +func classifyWorkloads( + yas *unitv1beta1.YurtAppSet, + currentWorkloads []metav1.Object, + expectedNodePools sets.Set[string], + expectedRevision string, +) (needDeleted, needUpdate []metav1.Object, needCreate []string) { // classify workloads by nodepool name nodePoolsToWorkloads := make(map[string]metav1.Object) @@ -326,7 +381,11 @@ func classifyWorkloads(yas *unitv1beta1.YurtAppSet, currentWorkloads []metav1.Ob } // Conciliate workloads as yas spec expect -func (r *ReconcileYurtAppSet) conciliateWorkloads(yas *unitv1beta1.YurtAppSet, expectedRevision *appsv1.ControllerRevision, newStatus *unitv1beta1.YurtAppSetStatus) (expectedNps sets.Set[string], curWorkloads []metav1.Object, err error) { +func (r *ReconcileYurtAppSet) conciliateWorkloads( + yas *unitv1beta1.YurtAppSet, + expectedRevision *appsv1.ControllerRevision, + newStatus *unitv1beta1.YurtAppSetStatus, +) (expectedNps sets.Set[string], curWorkloads []metav1.Object, err error) { // Get yas selected NodePools // this may infect yas poolfound condition @@ -353,30 +412,54 @@ func (r *ReconcileYurtAppSet) conciliateWorkloads(yas *unitv1beta1.YurtAppSet, e templateType := workloadManager.GetTemplateType() // Classify workloads into del/create/update 3 categories - needDelWorkloads, needUpdateWorkloads, needCreateNodePools := classifyWorkloads(yas, curWorkloads, expectedNps, expectedRevision.GetName()) + needDelWorkloads, needUpdateWorkloads, needCreateNodePools := classifyWorkloads( + yas, + curWorkloads, + expectedNps, + expectedRevision.GetName(), + ) // Manipulate resources // 1. create workloads if len(needCreateNodePools) > 0 { - createdNum, createdErr := util.SlowStartBatch(len(needCreateNodePools), slowStartInitialBatchSize, func(idx int) error { - nodepoolName := needCreateNodePools[idx] - err := workloadManager.Create(yas, nodepoolName, expectedRevision.GetName()) - if err != nil { - klog.Errorf("YurtAppSet[%s/%s] templatetype %s create workload by nodepool %s error: %s", - yas.GetNamespace(), yas.GetName(), templateType, nodepoolName, err.Error()) - if !errors.IsTimeout(err) { - return fmt.Errorf("YurtAppSet[%s/%s] templatetype %s create workload by nodepool %s error: %s", + createdNum, createdErr := util.SlowStartBatch( + len(needCreateNodePools), + slowStartInitialBatchSize, + func(idx int) error { + nodepoolName := needCreateNodePools[idx] + err := workloadManager.Create(yas, nodepoolName, expectedRevision.GetName()) + if err != nil { + klog.Errorf("YurtAppSet[%s/%s] templatetype %s create workload by nodepool %s error: %s", yas.GetNamespace(), yas.GetName(), templateType, nodepoolName, err.Error()) + if !errors.IsTimeout(err) { + return fmt.Errorf("YurtAppSet[%s/%s] templatetype %s create workload by nodepool %s error: %s", + yas.GetNamespace(), yas.GetName(), templateType, nodepoolName, err.Error()) + } } - } - klog.Infof("YurtAppSet[%s/%s] create workload [%s/%s] success", - yas.GetNamespace(), yas.GetName(), templateType, nodepoolName) - return nil - }) + klog.Infof("YurtAppSet[%s/%s] create workload [%s/%s] success", + yas.GetNamespace(), yas.GetName(), templateType, nodepoolName) + return nil + }, + ) if createdErr == nil { - r.recorder.Eventf(yas.DeepCopy(), corev1.EventTypeNormal, fmt.Sprintf("Successful %s", eventTypeWorkloadsCreated), "Create %d %s", createdNum, templateType) - SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppDispatchced, corev1.ConditionTrue, "", "All expected workloads are created successfully")) + r.recorder.Eventf( + yas.DeepCopy(), + corev1.EventTypeNormal, + fmt.Sprintf("Successful %s", eventTypeWorkloadsCreated), + "Create %d %s", + createdNum, + templateType, + ) + SetYurtAppSetCondition( + newStatus, + NewYurtAppSetCondition( + unitv1beta1.AppSetAppDispatchced, + corev1.ConditionTrue, + "", + "All expected workloads are created successfully", + ), + ) } else { errs = append(errs, createdErr) SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppDispatchced, corev1.ConditionFalse, "CreateWorkloadError", createdErr.Error())) @@ -389,21 +472,56 @@ func (r *ReconcileYurtAppSet) conciliateWorkloads(yas *unitv1beta1.YurtAppSet, e workloadTobeDeleted := needDelWorkloads[idx] err := workloadManager.Delete(yas, workloadTobeDeleted) if err != nil { - klog.Errorf("YurtAppSet[%s/%s] delete %s[%s/%s] error: %s", - yas.GetNamespace(), yas.GetName(), templateType, workloadTobeDeleted.GetNamespace(), workloadTobeDeleted.GetName(), err.Error()) + klog.Errorf( + "YurtAppSet[%s/%s] delete %s[%s/%s] error: %s", + yas.GetNamespace(), + yas.GetName(), + templateType, + workloadTobeDeleted.GetNamespace(), + workloadTobeDeleted.GetName(), + err.Error(), + ) if !errors.IsTimeout(err) { - return fmt.Errorf("YurtAppSet[%s/%s] delete %s[%s/%s] error: %s", - yas.GetNamespace(), yas.GetName(), templateType, workloadTobeDeleted.GetNamespace(), workloadTobeDeleted.GetName(), err.Error()) + return fmt.Errorf( + "YurtAppSet[%s/%s] delete %s[%s/%s] error: %s", + yas.GetNamespace(), + yas.GetName(), + templateType, + workloadTobeDeleted.GetNamespace(), + workloadTobeDeleted.GetName(), + err.Error(), + ) } } - klog.Infof("YurtAppSet[%s/%s] templatetype delete %s[%s/%s] success", - yas.GetNamespace(), yas.GetName(), templateType, workloadTobeDeleted.GetNamespace(), workloadTobeDeleted.GetName()) + klog.Infof( + "YurtAppSet[%s/%s] templatetype delete %s[%s/%s] success", + yas.GetNamespace(), + yas.GetName(), + templateType, + workloadTobeDeleted.GetNamespace(), + workloadTobeDeleted.GetName(), + ) return nil }) if delErr == nil { - r.recorder.Eventf(yas.DeepCopy(), corev1.EventTypeNormal, fmt.Sprintf("Successful %s", eventTypeWorkloadsDeleted), "Delete %d %s", delNum, templateType) - SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppDeleted, corev1.ConditionTrue, "", "Unexpected workloads are deleted successfully")) + r.recorder.Eventf( + yas.DeepCopy(), + corev1.EventTypeNormal, + fmt.Sprintf("Successful %s", eventTypeWorkloadsDeleted), + "Delete %d %s", + delNum, + templateType, + ) + SetYurtAppSetCondition( + newStatus, + NewYurtAppSetCondition( + unitv1beta1.AppSetAppDeleted, + corev1.ConditionTrue, + "", + "Unexpected workloads are deleted successfully", + ), + ) } else { errs = append(errs, delErr) SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppDeleted, corev1.ConditionFalse, "DeleteWorkloadError", delErr.Error())) @@ -412,23 +530,68 @@ func (r *ReconcileYurtAppSet) conciliateWorkloads(yas *unitv1beta1.YurtAppSet, e // 3. update workloads if len(needUpdateWorkloads) > 0 { - updatedNum, updateErr := util.SlowStartBatch(len(needUpdateWorkloads), slowStartInitialBatchSize, func(index int) error { - workloadTobeUpdated := needUpdateWorkloads[index] - err := workloadManager.Update(yas, workloadTobeUpdated, workloadmanager.GetWorkloadRefNodePool(workloadTobeUpdated), expectedRevision.GetName()) - if err != nil { - r.recorder.Event(yas.DeepCopy(), corev1.EventTypeWarning, fmt.Sprintf("Failed %s", eventTypeWorkloadsUpdated), - fmt.Sprintf("Error updating %s %s when updating: %s", templateType, workloadTobeUpdated.GetName(), err)) - klog.Errorf("YurtAppSet[%s/%s] update workload[%s/%s/%s] error %v", yas.GetNamespace(), yas.GetName(), - templateType, workloadTobeUpdated.GetNamespace(), workloadTobeUpdated.GetName(), err) - } - klog.Infof("YurtAppSet[%s/%s] templatetype %s update workload by nodepool %s success", - yas.GetNamespace(), yas.GetName(), templateType, workloadmanager.GetWorkloadRefNodePool(workloadTobeUpdated)) - return err - }) + updatedNum, updateErr := util.SlowStartBatch( + len(needUpdateWorkloads), + slowStartInitialBatchSize, + func(index int) error { + workloadTobeUpdated := needUpdateWorkloads[index] + err := workloadManager.Update( + yas, + workloadTobeUpdated, + workloadmanager.GetWorkloadRefNodePool(workloadTobeUpdated), + expectedRevision.GetName(), + ) + if err != nil { + r.recorder.Event( + yas.DeepCopy(), + corev1.EventTypeWarning, + fmt.Sprintf("Failed %s", eventTypeWorkloadsUpdated), + fmt.Sprintf( + "Error updating %s %s when updating: %s", + templateType, + workloadTobeUpdated.GetName(), + err, + ), + ) + klog.Errorf( + "YurtAppSet[%s/%s] update workload[%s/%s/%s] error %v", + yas.GetNamespace(), + yas.GetName(), + templateType, + workloadTobeUpdated.GetNamespace(), + workloadTobeUpdated.GetName(), + err, + ) + } + klog.Infof( + "YurtAppSet[%s/%s] templatetype %s update workload by nodepool %s success", + yas.GetNamespace(), + yas.GetName(), + templateType, + workloadmanager.GetWorkloadRefNodePool(workloadTobeUpdated), + ) + return err + }, + ) if updateErr == nil { - r.recorder.Eventf(yas.DeepCopy(), corev1.EventTypeNormal, fmt.Sprintf("Successful %s", eventTypeWorkloadsUpdated), "Update %d %s", updatedNum, templateType) - SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppUpdated, corev1.ConditionTrue, "", "All expected workloads are updated successfully")) + r.recorder.Eventf( + yas.DeepCopy(), + corev1.EventTypeNormal, + fmt.Sprintf("Successful %s", eventTypeWorkloadsUpdated), + "Update %d %s", + updatedNum, + templateType, + ) + SetYurtAppSetCondition( + newStatus, + NewYurtAppSetCondition( + unitv1beta1.AppSetAppUpdated, + corev1.ConditionTrue, + "", + "All expected workloads are updated successfully", + ), + ) } else { errs = append(errs, updateErr) SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppUpdated, corev1.ConditionFalse, "UpdateWorkloadError", updateErr.Error())) @@ -439,7 +602,14 @@ func (r *ReconcileYurtAppSet) conciliateWorkloads(yas *unitv1beta1.YurtAppSet, e return } -func (r *ReconcileYurtAppSet) conciliateYurtAppSet(yas *unitv1beta1.YurtAppSet, curWorkloads []metav1.Object, allRevisions []*apps.ControllerRevision, expectedRevision *appsv1.ControllerRevision, expectedNps sets.Set[string], newStatus *unitv1beta1.YurtAppSetStatus) error { +func (r *ReconcileYurtAppSet) conciliateYurtAppSet( + yas *unitv1beta1.YurtAppSet, + curWorkloads []metav1.Object, + allRevisions []*apps.ControllerRevision, + expectedRevision *appsv1.ControllerRevision, + expectedNps sets.Set[string], + newStatus *unitv1beta1.YurtAppSetStatus, +) error { if err := r.conciliateYurtAppSetStatus(yas, curWorkloads, expectedRevision, expectedNps, newStatus); err != nil { return err } @@ -447,7 +617,13 @@ func (r *ReconcileYurtAppSet) conciliateYurtAppSet(yas *unitv1beta1.YurtAppSet, } // update yas status and clean unused revisions -func (r *ReconcileYurtAppSet) conciliateYurtAppSetStatus(yas *unitv1beta1.YurtAppSet, curWorkloads []metav1.Object, expectedRevision *appsv1.ControllerRevision, expectedNps sets.Set[string], newStatus *unitv1beta1.YurtAppSetStatus) error { +func (r *ReconcileYurtAppSet) conciliateYurtAppSetStatus( + yas *unitv1beta1.YurtAppSet, + curWorkloads []metav1.Object, + expectedRevision *appsv1.ControllerRevision, + expectedNps sets.Set[string], + newStatus *unitv1beta1.YurtAppSetStatus, +) error { // calculate yas current status readyWorkloads, updatedWorkloads := 0, 0 @@ -456,7 +632,8 @@ func (r *ReconcileYurtAppSet) conciliateYurtAppSetStatus(yas *unitv1beta1.YurtAp if workloadObj.Status.ReadyReplicas == workloadObj.Status.Replicas { readyWorkloads++ } - if workloadmanager.GetWorkloadHash(workloadObj) == expectedRevision.GetName() && workloadObj.Status.UpdatedReplicas == workloadObj.Status.Replicas { + if workloadmanager.GetWorkloadHash(workloadObj) == expectedRevision.GetName() && + workloadObj.Status.UpdatedReplicas == workloadObj.Status.Replicas { updatedWorkloads++ } } @@ -467,7 +644,10 @@ func (r *ReconcileYurtAppSet) conciliateYurtAppSetStatus(yas *unitv1beta1.YurtAp newStatus.CurrentRevision = expectedRevision.GetName() if newStatus.TotalWorkloads == 0 { - SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppReady, corev1.ConditionFalse, "NoWorkloadFound", "")) + SetYurtAppSetCondition( + newStatus, + NewYurtAppSetCondition(unitv1beta1.AppSetAppReady, corev1.ConditionFalse, "NoWorkloadFound", ""), + ) } else if newStatus.TotalWorkloads == newStatus.ReadyWorkloads { SetYurtAppSetCondition(newStatus, NewYurtAppSetCondition(unitv1beta1.AppSetAppReady, corev1.ConditionTrue, "AllWorkloadsReady", "")) } else { @@ -483,7 +663,11 @@ func (r *ReconcileYurtAppSet) conciliateYurtAppSetStatus(yas *unitv1beta1.YurtAp oldStatus.UpdatedWorkloads == newStatus.UpdatedWorkloads && yas.Generation == newStatus.ObservedGeneration && reflect.DeepEqual(oldStatus.Conditions, newStatus.Conditions) { - klog.Infof("YurtAppSet[%s/%s] oldStatus==newStatus, no need to update status", yas.GetNamespace(), yas.GetName()) + klog.Infof( + "YurtAppSet[%s/%s] oldStatus==newStatus, no need to update status", + yas.GetNamespace(), + yas.GetName(), + ) return nil } else { klog.V(5).Infof("YurtAppSet[%s/%s] oldStatus=%+v, newStatus=%+v, need to update status", yas.GetNamespace(), yas.GetName(), oldStatus, newStatus) diff --git a/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller_test.go b/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller_test.go index c59568af5d5..72de8ad3dd4 100644 --- a/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller_test.go +++ b/pkg/yurtmanager/controller/yurtappset/yurt_app_set_controller_test.go @@ -32,6 +32,7 @@ import ( "github.com/openyurtio/openyurt/pkg/apis/apps" "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/yurtappset/workloadmanager" ) @@ -143,7 +144,12 @@ func (f *fakeEventRecorder) Event(object runtime.Object, eventtype, reason, mess func (f *fakeEventRecorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...interface{}) { } -func (f *fakeEventRecorder) AnnotatedEventf(object runtime.Object, annotations map[string]string, eventtype, reason, messageFmt string, args ...interface{}) { +func (f *fakeEventRecorder) AnnotatedEventf( + object runtime.Object, + annotations map[string]string, + eventtype, reason, messageFmt string, + args ...interface{}, +) { } func TestReconcile(t *testing.T) { @@ -189,12 +195,12 @@ func TestReconcile(t *testing.T) { }, }, npList: []client.Object{ - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-np1", }, }, - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-np2", }, @@ -279,7 +285,7 @@ func TestReconcile(t *testing.T) { }, }, npList: []client.Object{ - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-np1", }, @@ -336,7 +342,7 @@ func TestReconcile(t *testing.T) { }, }, npList: []client.Object{ - &v1beta1.NodePool{ + &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "test-np1", }, @@ -396,7 +402,11 @@ func TestReconcile(t *testing.T) { if tt.isUpdated { for _, deploy := range deployList.Items { - assert.NotEqual(t, deploy.Labels[apps.ControllerRevisionHashLabelKey], tt.yas.Status.CurrentRevision) + assert.NotEqual( + t, + deploy.Labels[apps.ControllerRevisionHashLabelKey], + tt.yas.Status.CurrentRevision, + ) } } }) diff --git a/pkg/yurtmanager/webhook/node/v1/node_default.go b/pkg/yurtmanager/webhook/node/v1/node_default.go index b680e190d25..41b245d32cd 100644 --- a/pkg/yurtmanager/webhook/node/v1/node_default.go +++ b/pkg/yurtmanager/webhook/node/v1/node_default.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/openyurtio/openyurt/pkg/apis/apps" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) @@ -47,7 +47,7 @@ func (webhook *NodeHandler) Default(ctx context.Context, obj runtime.Object) err } } - var np appsv1beta1.NodePool + var np appsv1beta2.NodePool if err := webhook.Client.Get(ctx, types.NamespacedName{Name: npName}, &np); err != nil { return err } diff --git a/pkg/yurtmanager/webhook/node/v1/node_default_test.go b/pkg/yurtmanager/webhook/node/v1/node_default_test.go index 8dbf43d3c7a..b74e84e99fc 100644 --- a/pkg/yurtmanager/webhook/node/v1/node_default_test.go +++ b/pkg/yurtmanager/webhook/node/v1/node_default_test.go @@ -31,14 +31,14 @@ import ( fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/openyurtio/openyurt/pkg/apis" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) func TestDefault(t *testing.T) { testcases := map[string]struct { node runtime.Object - pool *appsv1beta1.NodePool + pool *appsv1beta2.NodePool errCode int errMsg string }{ @@ -71,12 +71,12 @@ func TestDefault(t *testing.T) { }, }, }, - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "shanghai", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, HostNetwork: true, }, }, diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_default_test.go b/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_default_test.go deleted file mode 100644 index 5ec79421ed1..00000000000 --- a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_default_test.go +++ /dev/null @@ -1,120 +0,0 @@ -/* -Copyright 2023 The OpenYurt Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "context" - "reflect" - "testing" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" -) - -func TestDefault(t *testing.T) { - testcases := map[string]struct { - obj runtime.Object - errHappened bool - wantedNodePool *v1beta1.NodePool - }{ - "it is not a nodepool": { - obj: &corev1.Pod{}, - errHappened: true, - }, - "nodepool has no type": { - obj: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: v1beta1.NodePoolSpec{ - HostNetwork: true, - }, - }, - wantedNodePool: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Labels: map[string]string{ - "nodepool.openyurt.io/type": "edge", - }, - }, - Spec: v1beta1.NodePoolSpec{ - HostNetwork: true, - Type: v1beta1.Edge, - }, - Status: v1beta1.NodePoolStatus{ - ReadyNodeNum: 0, - UnreadyNodeNum: 0, - Nodes: []string{}, - }, - }, - }, - "nodepool has pool type": { - obj: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Labels: map[string]string{ - "foo": "bar", - }, - }, - Spec: v1beta1.NodePoolSpec{ - HostNetwork: true, - Type: v1beta1.Cloud, - }, - }, - wantedNodePool: &v1beta1.NodePool{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - Labels: map[string]string{ - "foo": "bar", - "nodepool.openyurt.io/type": "cloud", - }, - }, - Spec: v1beta1.NodePoolSpec{ - HostNetwork: true, - Type: v1beta1.Cloud, - }, - Status: v1beta1.NodePoolStatus{ - ReadyNodeNum: 0, - UnreadyNodeNum: 0, - Nodes: []string{}, - }, - }, - }, - } - - for k, tc := range testcases { - t.Run(k, func(t *testing.T) { - h := NodePoolHandler{} - err := h.Default(context.TODO(), tc.obj) - if tc.errHappened { - if err == nil { - t.Errorf("expect error, got nil") - } - } else if err != nil { - t.Errorf("expect no error, but got %v", err) - } else { - currentNp := tc.obj.(*v1beta1.NodePool) - if !reflect.DeepEqual(currentNp, tc.wantedNodePool) { - t.Errorf("expect %#+v, got %#+v", tc.wantedNodePool, currentNp) - } - } - }) - } -} diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_default.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go similarity index 55% rename from pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_default.go rename to pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go index ea359fb55db..092ac1fcc31 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_default.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1beta2 import ( "context" @@ -22,22 +22,23 @@ import ( "strings" apierrors "k8s.io/apimachinery/pkg/api/errors" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "github.com/openyurtio/openyurt/pkg/apis/apps" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" ) // Default satisfies the defaulting webhook interface. func (webhook *NodePoolHandler) Default(ctx context.Context, obj runtime.Object) error { - np, ok := obj.(*v1beta1.NodePool) + np, ok := obj.(*v1beta2.NodePool) if !ok { return apierrors.NewBadRequest(fmt.Sprintf("expected a NodePool but got a %T", obj)) } // specify default type as Edge if len(np.Spec.Type) == 0 { - np.Spec.Type = v1beta1.Edge + np.Spec.Type = v1beta2.Edge } if np.Labels == nil { @@ -49,11 +50,48 @@ func (webhook *NodePoolHandler) Default(ctx context.Context, obj runtime.Object) } // init node pool status - np.Status = v1beta1.NodePoolStatus{ + np.Status = v1beta2.NodePoolStatus{ ReadyNodeNum: 0, UnreadyNodeNum: 0, Nodes: make([]string, 0), } + // Set default election strategy + if np.Spec.LeaderElectionStrategy == "" { + np.Spec.LeaderElectionStrategy = string(v1beta2.ElectionStrategyRandom) + } + + // Set default PoolScopeMetadata + defaultPoolScopeMetadata := []v1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + } + + if np.Spec.PoolScopeMetadata == nil { + np.Spec.PoolScopeMetadata = defaultPoolScopeMetadata + return nil + } + + // Ensure defaultPoolScopeMetadata + // Hash existing PoolScopeMetadata + gvkMap := make(map[v1.GroupVersionKind]struct{}) + for _, m := range np.Spec.PoolScopeMetadata { + gvkMap[m] = struct{}{} + } + + // Add missing defaultPoolScopeMetadata + for _, m := range defaultPoolScopeMetadata { + if _, ok := gvkMap[m]; !ok { + np.Spec.PoolScopeMetadata = append(np.Spec.PoolScopeMetadata, m) + } + } return nil } diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go new file mode 100644 index 00000000000..42499384984 --- /dev/null +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_default_test.go @@ -0,0 +1,445 @@ +/* +Copyright 2024 The OpenYurt Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" +) + +func TestDefault(t *testing.T) { + testcases := map[string]struct { + obj runtime.Object + expectErr bool + wantedNodePool *v1beta2.NodePool + }{ + "it is not a nodepool": { + obj: &corev1.Pod{}, + expectErr: true, + }, + "nodepool has no type": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "nodepool.openyurt.io/type": "edge", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Edge, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyRandom), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has pool type": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyRandom), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has no leader election strategy": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: "", + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyRandom), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has no mark election strategy": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has no pool scope metadata": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has pool scope metadata": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "Endpoints", + }, + }, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "Endpoints", + }, + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has v1.service pool scope metadata": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + }, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + "nodepool has v1.EndpointSlice pool scope metadata": { + obj: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + }, + }, + }, + wantedNodePool: &v1beta2.NodePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Labels: map[string]string{ + "foo": "bar", + "nodepool.openyurt.io/type": "cloud", + }, + }, + Spec: v1beta2.NodePoolSpec{ + HostNetwork: true, + Type: v1beta2.Cloud, + LeaderElectionStrategy: string(v1beta2.ElectionStrategyMark), + PoolScopeMetadata: []metav1.GroupVersionKind{ + { + Group: "discovery.k8s.io", + Version: "v1", + Kind: "EndpointSlice", + }, + { + Group: "core", + Version: "v1", + Kind: "Service", + }, + }, + }, + Status: v1beta2.NodePoolStatus{ + ReadyNodeNum: 0, + UnreadyNodeNum: 0, + Nodes: []string{}, + }, + }, + }, + } + + for k, tc := range testcases { + t.Run(k, func(t *testing.T) { + h := NodePoolHandler{} + err := h.Default(context.TODO(), tc.obj) + if tc.expectErr { + require.Error(t, err, "expected no error") + return + } + require.NoError(t, err, "expected error") + + currentNp := tc.obj.(*v1beta2.NodePool) + assert.Equal(t, tc.wantedNodePool, currentNp) + }) + } +} diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_handler.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go similarity index 80% rename from pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_handler.go rename to pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go index 1fc444149ed..f05eb6de22b 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_handler.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_handler.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1beta2 import ( ctrl "sigs.k8s.io/controller-runtime" @@ -23,7 +23,7 @@ import ( yurtClient "github.com/openyurtio/openyurt/cmd/yurt-manager/app/client" "github.com/openyurtio/openyurt/cmd/yurt-manager/names" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/util" ) @@ -32,11 +32,11 @@ func (webhook *NodePoolHandler) SetupWebhookWithManager(mgr ctrl.Manager) (strin // init webhook.Client = yurtClient.GetClientByControllerNameOrDie(mgr, names.NodePoolController) - return util.RegisterWebhook(mgr, &v1beta1.NodePool{}, webhook) + return util.RegisterWebhook(mgr, &v1beta2.NodePool{}, webhook) } -// +kubebuilder:webhook:path=/validate-apps-openyurt-io-v1beta1-nodepool,mutating=false,failurePolicy=fail,groups=apps.openyurt.io,resources=nodepools,verbs=create;update;delete,versions=v1beta1,name=v.v1beta1.nodepool.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 -// +kubebuilder:webhook:path=/mutate-apps-openyurt-io-v1beta1-nodepool,mutating=true,failurePolicy=fail,groups=apps.openyurt.io,resources=nodepools,verbs=create,versions=v1beta1,name=m.v1beta1.nodepool.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta1 +// +kubebuilder:webhook:path=/validate-apps-openyurt-io-v1beta2-nodepool,mutating=false,failurePolicy=fail,groups=apps.openyurt.io,resources=nodepools,verbs=create;update;delete,versions=v1beta2,name=v.v1beta2.nodepool.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta2 +// +kubebuilder:webhook:path=/mutate-apps-openyurt-io-v1beta2-nodepool,mutating=true,failurePolicy=fail,groups=apps.openyurt.io,resources=nodepools,verbs=create,versions=v1beta2,name=m.v1beta2.nodepool.kb.io,sideEffects=None,admissionReviewVersions=v1;v1beta2 // NodePoolHandler implements a validating and defaulting webhook for Cluster. type NodePoolHandler struct { diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go similarity index 68% rename from pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go rename to pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go index f7a6a440ecb..feeaa4e16cb 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1beta2 import ( "context" @@ -29,37 +29,44 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) // ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type. func (webhook *NodePoolHandler) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { - np, ok := obj.(*appsv1beta1.NodePool) + np, ok := obj.(*appsv1beta2.NodePool) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a NodePool but got a %T", obj)) } if allErrs := validateNodePoolSpec(&np.Spec); len(allErrs) > 0 { - return nil, apierrors.NewInvalid(appsv1beta1.GroupVersion.WithKind("NodePool").GroupKind(), np.Name, allErrs) + return nil, apierrors.NewInvalid(appsv1beta2.GroupVersion.WithKind("NodePool").GroupKind(), np.Name, allErrs) } return nil, nil } // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type. -func (webhook *NodePoolHandler) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { - newNp, ok := newObj.(*appsv1beta1.NodePool) +func (webhook *NodePoolHandler) ValidateUpdate( + ctx context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { + newNp, ok := newObj.(*appsv1beta2.NodePool) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a NodePool but got a %T", newObj)) } - oldNp, ok := oldObj.(*appsv1beta1.NodePool) + oldNp, ok := oldObj.(*appsv1beta2.NodePool) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a NodePool but got a %T", oldObj)) } if allErrs := validateNodePoolSpecUpdate(&newNp.Spec, &oldNp.Spec); len(allErrs) > 0 { - return nil, apierrors.NewForbidden(appsv1beta1.GroupVersion.WithResource("nodepools").GroupResource(), newNp.Name, allErrs[0]) + return nil, apierrors.NewForbidden( + appsv1beta2.GroupVersion.WithResource("nodepools").GroupResource(), + newNp.Name, + allErrs[0], + ) } return nil, nil @@ -67,12 +74,16 @@ func (webhook *NodePoolHandler) ValidateUpdate(ctx context.Context, oldObj, newO // ValidateDelete implements webhook.CustomValidator so a webhook will be registered for the type. func (webhook *NodePoolHandler) ValidateDelete(_ context.Context, obj runtime.Object) (admission.Warnings, error) { - np, ok := obj.(*appsv1beta1.NodePool) + np, ok := obj.(*appsv1beta2.NodePool) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a NodePool but got a %T", obj)) } if allErrs := validateNodePoolDeletion(webhook.Client, np); len(allErrs) > 0 { - return nil, apierrors.NewForbidden(appsv1beta1.GroupVersion.WithResource("nodepools").GroupResource(), np.Name, allErrs[0]) + return nil, apierrors.NewForbidden( + appsv1beta2.GroupVersion.WithResource("nodepools").GroupResource(), + np.Name, + allErrs[0], + ) } return nil, nil @@ -97,25 +108,46 @@ func validateNodePoolSpecAnnotations(annotations map[string]string) field.ErrorL } // validateNodePoolSpec validates the nodepool spec. -func validateNodePoolSpec(spec *appsv1beta1.NodePoolSpec) field.ErrorList { +func validateNodePoolSpec(spec *appsv1beta2.NodePoolSpec) field.ErrorList { if allErrs := validateNodePoolSpecAnnotations(spec.Annotations); allErrs != nil { return allErrs } // NodePool type should be Edge or Cloud - if spec.Type != appsv1beta1.Edge && spec.Type != appsv1beta1.Cloud { - return []*field.Error{field.Invalid(field.NewPath("spec").Child("type"), spec.Type, "pool type should be Edge or Cloud")} + if spec.Type != appsv1beta2.Edge && spec.Type != appsv1beta2.Cloud { + return []*field.Error{ + field.Invalid(field.NewPath("spec").Child("type"), spec.Type, "pool type should be Edge or Cloud"), + } } // Cloud NodePool can not set HostNetwork=true - if spec.Type == appsv1beta1.Cloud && spec.HostNetwork { - return []*field.Error{field.Invalid(field.NewPath("spec").Child("hostNetwork"), spec.HostNetwork, "Cloud NodePool cloud not support hostNetwork")} + if spec.Type == appsv1beta2.Cloud && spec.HostNetwork { + return []*field.Error{ + field.Invalid( + field.NewPath("spec").Child("hostNetwork"), + spec.HostNetwork, + "Cloud NodePool cloud not support hostNetwork", + ), + } + } + + // Check leader election strategy has been set to Random or Mark + switch spec.LeaderElectionStrategy { + case string(appsv1beta2.ElectionStrategyRandom), string(appsv1beta2.ElectionStrategyMark): + return nil + default: + return []*field.Error{ + field.Invalid( + field.NewPath("spec").Child("leaderElectionStrategy"), + spec.LeaderElectionStrategy, + "leaderElectionStrategy should be Random or Mark", + ), + } } - return nil } // validateNodePoolSpecUpdate tests if required fields in the NodePool spec are set. -func validateNodePoolSpecUpdate(spec, oldSpec *appsv1beta1.NodePoolSpec) field.ErrorList { +func validateNodePoolSpecUpdate(spec, oldSpec *appsv1beta2.NodePoolSpec) field.ErrorList { if allErrs := validateNodePoolSpec(spec); allErrs != nil { return allErrs } @@ -130,12 +162,22 @@ func validateNodePoolSpecUpdate(spec, oldSpec *appsv1beta1.NodePoolSpec) field.E field.Forbidden(field.NewPath("spec").Child("hostNetwork"), "pool hostNetwork can't be changed"), }) } + + if spec.InterConnectivity != oldSpec.InterConnectivity { + return field.ErrorList([]*field.Error{ + field.Forbidden( + field.NewPath("spec").Child("interConnectivity"), + "pool interConnectivity can't be changed", + ), + }) + } + return nil } // validateNodePoolDeletion validate the nodepool deletion event, which prevents // the default-nodepool from being deleted -func validateNodePoolDeletion(cli client.Client, np *appsv1beta1.NodePool) field.ErrorList { +func validateNodePoolDeletion(cli client.Client, np *appsv1beta2.NodePool) field.ErrorList { nodes := corev1.NodeList{} if err := cli.List(context.TODO(), &nodes, client.MatchingLabels(map[string]string{projectinfo.GetNodePoolLabel(): np.Name})); err != nil { diff --git a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation_test.go b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go similarity index 54% rename from pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation_test.go rename to pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go index 4e831a29fef..0be9d6e12b0 100644 --- a/pkg/yurtmanager/webhook/nodepool/v1beta1/nodepool_validation_test.go +++ b/pkg/yurtmanager/webhook/nodepool/v1beta2/nodepool_validation_test.go @@ -14,13 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1beta1 +package v1beta2 import ( "context" "net/http" "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -30,7 +32,7 @@ import ( fakeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/openyurtio/openyurt/pkg/apis" - appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) @@ -40,9 +42,10 @@ func TestValidateCreate(t *testing.T) { errcode int }{ "it is a normal nodepool": { - pool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + pool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), }, }, errcode: 0, @@ -52,9 +55,9 @@ func TestValidateCreate(t *testing.T) { errcode: http.StatusBadRequest, }, "invalid annotation": { - pool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + pool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Annotations: map[string]string{ "-&#foo": "invalid annotation", }, @@ -63,27 +66,36 @@ func TestValidateCreate(t *testing.T) { errcode: http.StatusUnprocessableEntity, }, "invalid pool type": { - pool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + pool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Type: "invalid type", }, }, errcode: http.StatusUnprocessableEntity, }, + "invalid leader election strategy": { + pool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + LeaderElectionStrategy: "invalid strategy", + }, + }, + errcode: http.StatusUnprocessableEntity, + }, } handler := &NodePoolHandler{} for k, tc := range testcases { t.Run(k, func(t *testing.T) { _, err := handler.ValidateCreate(context.TODO(), tc.pool) - if tc.errcode == 0 && err != nil { - t.Errorf("Expected error code %d, got %v", tc.errcode, err) - } else if tc.errcode != 0 { - statusErr := err.(*errors.StatusError) - if tc.errcode != int(statusErr.Status().Code) { - t.Errorf("Expected error code %d, got %v", tc.errcode, err) - } + if tc.errcode == 0 { + require.NoError(t, err, "Expected error code %d, got %v", tc.errcode, err) + return } + require.Error(t, err, "Expected error code %d, got %v", tc.errcode, err) + + statusErr := err.(*errors.StatusError) + assert.Equal(t, tc.errcode, int(statusErr.Status().Code), "Expected error code %d, got %v", tc.errcode, err) }) } } @@ -95,86 +107,119 @@ func TestValidateUpdate(t *testing.T) { errcode int }{ "update a normal nodepool": { - oldPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + oldPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyRandom), }, }, - newPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + newPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "foo": "bar", }, + LeaderElectionStrategy: string(appsv1beta2.ElectionStrategyMark), }, }, errcode: 0, }, "oldPool is not a nodepool": { oldPool: &corev1.Node{}, - newPool: &appsv1beta1.NodePool{}, + newPool: &appsv1beta2.NodePool{}, errcode: http.StatusBadRequest, }, "newPool is not a nodepool": { - oldPool: &appsv1beta1.NodePool{}, + oldPool: &appsv1beta2.NodePool{}, newPool: &corev1.Node{}, errcode: http.StatusBadRequest, }, "invalid pool type": { - oldPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + oldPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, }, }, - newPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ + newPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ Type: "invalid type", }, }, errcode: http.StatusForbidden, }, "type is changed": { - oldPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + oldPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, }, }, - newPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Cloud, + newPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Cloud, }, }, errcode: http.StatusForbidden, }, "host network is changed": { - oldPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + oldPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, HostNetwork: false, }, }, - newPool: &appsv1beta1.NodePool{ - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + newPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, HostNetwork: true, }, }, errcode: http.StatusForbidden, }, + "interConnectivity is changed": { + oldPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + HostNetwork: false, + }, + }, + newPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + HostNetwork: true, + }, + }, + errcode: http.StatusForbidden, + }, + "leaderElectionStrategy is changed": { + oldPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + HostNetwork: false, + LeaderElectionStrategy: "mark", + }, + }, + newPool: &appsv1beta2.NodePool{ + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, + HostNetwork: false, + LeaderElectionStrategy: "random", + }, + }, + errcode: 0, + }, } handler := &NodePoolHandler{} for k, tc := range testcases { t.Run(k, func(t *testing.T) { _, err := handler.ValidateUpdate(context.TODO(), tc.oldPool, tc.newPool) - if tc.errcode == 0 && err != nil { - t.Errorf("Expected error code %d, got %v", tc.errcode, err) - } else if tc.errcode != 0 { - statusErr := err.(*errors.StatusError) - if tc.errcode != int(statusErr.Status().Code) { - t.Errorf("Expected error code %d, got %v", tc.errcode, err) - } + if tc.errcode == 0 { + require.NoError(t, err, "Expected error code %d, got %v", tc.errcode, err) + return } + require.Error(t, err) + statusErr := err.(*errors.StatusError) + assert.Equal(t, tc.errcode, int(statusErr.Status().Code), "Expected error code %d, got %v", tc.errcode, err) }) } } @@ -203,23 +248,23 @@ func prepareNodes() []client.Object { func prepareNodePools() []client.Object { pools := []client.Object{ - &appsv1beta1.NodePool{ + &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "hangzhou", }, }, }, - &appsv1beta1.NodePool{ + &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "beijing", }, - Spec: appsv1beta1.NodePoolSpec{ - Type: appsv1beta1.Edge, + Spec: appsv1beta2.NodePoolSpec{ + Type: appsv1beta2.Edge, Labels: map[string]string{ "region": "beijing", }, @@ -245,7 +290,7 @@ func TestValidateDelete(t *testing.T) { errcode int }{ "delete a empty nodepool": { - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "beijing", }, @@ -253,7 +298,7 @@ func TestValidateDelete(t *testing.T) { errcode: 0, }, "delete a nodepool with node in it": { - pool: &appsv1beta1.NodePool{ + pool: &appsv1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: "hangzhou", }, @@ -272,14 +317,13 @@ func TestValidateDelete(t *testing.T) { for k, tc := range testcases { t.Run(k, func(t *testing.T) { _, err := handler.ValidateDelete(context.TODO(), tc.pool) - if tc.errcode == 0 && err != nil { - t.Errorf("Expected error code %d, got %v", tc.errcode, err) - } else if tc.errcode != 0 { - statusErr := err.(*errors.StatusError) - if tc.errcode != int(statusErr.Status().Code) { - t.Errorf("Expected error code %d, got %v", tc.errcode, err) - } + if tc.errcode == 0 { + require.NoError(t, err, "Expected error code %d, got %v", tc.errcode, err) + return } + require.Error(t, err) + statusErr := err.(*errors.StatusError) + assert.Equal(t, tc.errcode, int(statusErr.Status().Code), "Expected error code %d, got %v", tc.errcode, err) }) } } diff --git a/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation.go b/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation.go index 55bbd424e6d..bf3dffc33b1 100644 --- a/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation.go +++ b/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation.go @@ -27,14 +27,17 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - unitv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + unitv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/iot/v1alpha2" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/platformadmin/config" util "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/platformadmin/utils" ) // ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type. -func (webhook *PlatformAdminHandler) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { +func (webhook *PlatformAdminHandler) ValidateCreate( + ctx context.Context, + obj runtime.Object, +) (admission.Warnings, error) { platformAdmin, ok := obj.(*v1alpha2.PlatformAdmin) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a PlatformAdmin but got a %T", obj)) @@ -42,14 +45,21 @@ func (webhook *PlatformAdminHandler) ValidateCreate(ctx context.Context, obj run //validate if allErrs := webhook.validate(ctx, platformAdmin); len(allErrs) > 0 { - return nil, apierrors.NewInvalid(v1alpha2.GroupVersion.WithKind("PlatformAdmin").GroupKind(), platformAdmin.Name, allErrs) + return nil, apierrors.NewInvalid( + v1alpha2.GroupVersion.WithKind("PlatformAdmin").GroupKind(), + platformAdmin.Name, + allErrs, + ) } return nil, nil } // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type. -func (webhook *PlatformAdminHandler) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { +func (webhook *PlatformAdminHandler) ValidateUpdate( + ctx context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { newPlatformAdmin, ok := newObj.(*v1alpha2.PlatformAdmin) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a PlatformAdmin but got a %T", newObj)) @@ -63,7 +73,11 @@ func (webhook *PlatformAdminHandler) ValidateUpdate(ctx context.Context, oldObj, newErrorList := webhook.validate(ctx, newPlatformAdmin) oldErrorList := webhook.validate(ctx, oldPlatformAdmin) if allErrs := append(newErrorList, oldErrorList...); len(allErrs) > 0 { - return nil, apierrors.NewInvalid(v1alpha2.GroupVersion.WithKind("PlatformAdmin").GroupKind(), newPlatformAdmin.Name, allErrs) + return nil, apierrors.NewInvalid( + v1alpha2.GroupVersion.WithKind("PlatformAdmin").GroupKind(), + newPlatformAdmin.Name, + allErrs, + ) } return nil, nil } @@ -73,7 +87,10 @@ func (webhook *PlatformAdminHandler) ValidateDelete(_ context.Context, obj runti return nil, nil } -func (webhook *PlatformAdminHandler) validate(ctx context.Context, platformAdmin *v1alpha2.PlatformAdmin) field.ErrorList { +func (webhook *PlatformAdminHandler) validate( + ctx context.Context, + platformAdmin *v1alpha2.PlatformAdmin, +) field.ErrorList { // verify the version if specErrs := webhook.validatePlatformAdminSpec(platformAdmin); specErrs != nil { @@ -91,7 +108,13 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminSpec(platformAdmin *v1 // Verify that the platform is supported if platformAdmin.Spec.Platform != v1alpha2.PlatformAdminPlatformEdgeX { - return field.ErrorList{field.Invalid(field.NewPath("spec", "platform"), platformAdmin.Spec.Platform, "must be "+v1alpha2.PlatformAdminPlatformEdgeX)} + return field.ErrorList{ + field.Invalid( + field.NewPath("spec", "platform"), + platformAdmin.Spec.Platform, + "must be "+v1alpha2.PlatformAdminPlatformEdgeX, + ), + } } // Verify that it is a supported platformadmin version @@ -102,16 +125,27 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminSpec(platformAdmin *v1 } return field.ErrorList{ - field.Invalid(field.NewPath("spec", "version"), platformAdmin.Spec.Version, "must be one of"+strings.Join(config.ExtractVersionsName(webhook.Manifests).UnsortedList(), ",")), + field.Invalid( + field.NewPath("spec", "version"), + platformAdmin.Spec.Version, + "must be one of"+strings.Join(config.ExtractVersionsName(webhook.Manifests).UnsortedList(), ","), + ), } } -func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools(ctx context.Context, platformAdmin *v1alpha2.PlatformAdmin) field.ErrorList { +func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools( + ctx context.Context, + platformAdmin *v1alpha2.PlatformAdmin, +) field.ErrorList { // verify that the poolname is a right nodepool name - nodePools := &unitv1beta1.NodePoolList{} + nodePools := &unitv1beta2.NodePoolList{} if err := webhook.Client.List(ctx, nodePools); err != nil { return field.ErrorList{ - field.Invalid(field.NewPath("spec", "poolName"), platformAdmin.Spec.PoolName, "can not list nodepools, cause"+err.Error()), + field.Invalid( + field.NewPath("spec", "poolName"), + platformAdmin.Spec.PoolName, + "can not list nodepools, cause"+err.Error(), + ), } } ok := false @@ -131,13 +165,21 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools(ctx cont listOptions := client.MatchingFields{util.IndexerPathForNodepool: platformAdmin.Spec.PoolName} if err := webhook.Client.List(ctx, &platformadmins, listOptions); err != nil { return field.ErrorList{ - field.Invalid(field.NewPath("spec", "poolName"), platformAdmin.Spec.PoolName, "can not list platformadmins, cause "+err.Error()), + field.Invalid( + field.NewPath("spec", "poolName"), + platformAdmin.Spec.PoolName, + "can not list platformadmins, cause "+err.Error(), + ), } } for _, other := range platformadmins.Items { if platformAdmin.Name != other.Name { return field.ErrorList{ - field.Invalid(field.NewPath("spec", "poolName"), platformAdmin.Spec.PoolName, "already used by other platformadmin instance,"), + field.Invalid( + field.NewPath("spec", "poolName"), + platformAdmin.Spec.PoolName, + "already used by other platformadmin instance,", + ), } } } diff --git a/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation_test.go b/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation_test.go index e176a45379a..e9841b515fb 100644 --- a/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation_test.go +++ b/pkg/yurtmanager/webhook/platformadmin/v1alpha2/platformadmin_validation_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/openyurtio/openyurt/pkg/apis" - ut "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + ut "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/iot/v1alpha2" version "github.com/openyurtio/openyurt/pkg/apis/iot/v1alpha2" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/platformadmin/config" @@ -116,8 +116,11 @@ func TestValidateCreate(t *testing.T) { errCode: http.StatusUnprocessableEntity, }, { - name: "should get StatusUnprocessableEntityError when list PlatformAdmin failed", - client: NewFakeClient(buildClient(buildNodePool(), buildPlatformAdmin())).WithErr(&v1alpha2.PlatformAdminList{}, errors.New("list failed")).Build(), + name: "should get StatusUnprocessableEntityError when list PlatformAdmin failed", + client: NewFakeClient( + buildClient(buildNodePool(), buildPlatformAdmin()), + ).WithErr(&v1alpha2.PlatformAdminList{}, errors.New("list failed")). + Build(), obj: &v1alpha2.PlatformAdmin{ ObjectMeta: metav1.ObjectMeta{}, Spec: v1alpha2.PlatformAdminSpec{ @@ -305,7 +308,12 @@ func buildClient(nodePools []client.Object, platformAdmin []client.Object) clien _ = clientgoscheme.AddToScheme(scheme) _ = apis.AddToScheme(scheme) _ = version.SchemeBuilder.AddToScheme(scheme) - return fake.NewClientBuilder().WithScheme(scheme).WithObjects(nodePools...).WithObjects(platformAdmin...).WithIndex(&v1alpha2.PlatformAdmin{}, "spec.poolName", Indexer).Build() + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(nodePools...). + WithObjects(platformAdmin...). + WithIndex(&v1alpha2.PlatformAdmin{}, "spec.poolName", Indexer). + Build() } func buildPlatformAdmin() []client.Object { diff --git a/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation.go b/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation.go index 718f4b44b24..ebf244f360a 100644 --- a/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation.go +++ b/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation.go @@ -26,14 +26,17 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" - unitv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + unitv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/iot/v1beta1" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/platformadmin/config" util "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/platformadmin/utils" ) // ValidateCreate implements webhook.CustomValidator so a webhook will be registered for the type. -func (webhook *PlatformAdminHandler) ValidateCreate(ctx context.Context, obj runtime.Object) (admission.Warnings, error) { +func (webhook *PlatformAdminHandler) ValidateCreate( + ctx context.Context, + obj runtime.Object, +) (admission.Warnings, error) { platformAdmin, ok := obj.(*v1beta1.PlatformAdmin) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a PlatformAdmin but got a %T", obj)) @@ -41,14 +44,21 @@ func (webhook *PlatformAdminHandler) ValidateCreate(ctx context.Context, obj run //validate if allErrs := webhook.validate(ctx, platformAdmin); len(allErrs) > 0 { - return nil, apierrors.NewInvalid(v1beta1.GroupVersion.WithKind("PlatformAdmin").GroupKind(), platformAdmin.Name, allErrs) + return nil, apierrors.NewInvalid( + v1beta1.GroupVersion.WithKind("PlatformAdmin").GroupKind(), + platformAdmin.Name, + allErrs, + ) } return nil, nil } // ValidateUpdate implements webhook.CustomValidator so a webhook will be registered for the type. -func (webhook *PlatformAdminHandler) ValidateUpdate(ctx context.Context, oldObj, newObj runtime.Object) (admission.Warnings, error) { +func (webhook *PlatformAdminHandler) ValidateUpdate( + ctx context.Context, + oldObj, newObj runtime.Object, +) (admission.Warnings, error) { newPlatformAdmin, ok := newObj.(*v1beta1.PlatformAdmin) if !ok { return nil, apierrors.NewBadRequest(fmt.Sprintf("expected a PlatformAdmin but got a %T", newObj)) @@ -62,7 +72,11 @@ func (webhook *PlatformAdminHandler) ValidateUpdate(ctx context.Context, oldObj, newErrorList := webhook.validate(ctx, newPlatformAdmin) oldErrorList := webhook.validate(ctx, oldPlatformAdmin) if allErrs := append(newErrorList, oldErrorList...); len(allErrs) > 0 { - return nil, apierrors.NewInvalid(v1beta1.GroupVersion.WithKind("PlatformAdmin").GroupKind(), newPlatformAdmin.Name, allErrs) + return nil, apierrors.NewInvalid( + v1beta1.GroupVersion.WithKind("PlatformAdmin").GroupKind(), + newPlatformAdmin.Name, + allErrs, + ) } return nil, nil } @@ -72,7 +86,10 @@ func (webhook *PlatformAdminHandler) ValidateDelete(_ context.Context, obj runti return nil, nil } -func (webhook *PlatformAdminHandler) validate(ctx context.Context, platformAdmin *v1beta1.PlatformAdmin) field.ErrorList { +func (webhook *PlatformAdminHandler) validate( + ctx context.Context, + platformAdmin *v1beta1.PlatformAdmin, +) field.ErrorList { // verify the version if specErrs := webhook.validatePlatformAdminSpec(platformAdmin); specErrs != nil { return specErrs @@ -90,7 +107,13 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminSpec(platformAdmin *v1 // Verify that the platform is supported if platformAdmin.Spec.Platform != v1beta1.PlatformAdminPlatformEdgeX { - return field.ErrorList{field.Invalid(field.NewPath("spec", "platform"), platformAdmin.Spec.Platform, "must be "+v1beta1.PlatformAdminPlatformEdgeX)} + return field.ErrorList{ + field.Invalid( + field.NewPath("spec", "platform"), + platformAdmin.Spec.Platform, + "must be "+v1beta1.PlatformAdminPlatformEdgeX, + ), + } } // Verify that it is a supported platformadmin version @@ -101,16 +124,27 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminSpec(platformAdmin *v1 } return field.ErrorList{ - field.Invalid(field.NewPath("spec", "version"), platformAdmin.Spec.Version, "must be one of"+strings.Join(config.ExtractVersionsName(webhook.Manifests).UnsortedList(), ",")), + field.Invalid( + field.NewPath("spec", "version"), + platformAdmin.Spec.Version, + "must be one of"+strings.Join(config.ExtractVersionsName(webhook.Manifests).UnsortedList(), ","), + ), } } -func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools(ctx context.Context, platformAdmin *v1beta1.PlatformAdmin) field.ErrorList { +func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools( + ctx context.Context, + platformAdmin *v1beta1.PlatformAdmin, +) field.ErrorList { // verify that the poolnames are right nodepool names - nodepools := &unitv1beta1.NodePoolList{} + nodepools := &unitv1beta2.NodePoolList{} if err := webhook.Client.List(ctx, nodepools); err != nil { return field.ErrorList{ - field.Invalid(field.NewPath("spec", "nodepools"), platformAdmin.Spec.NodePools, "can not list nodepools, cause"+err.Error()), + field.Invalid( + field.NewPath("spec", "nodepools"), + platformAdmin.Spec.NodePools, + "can not list nodepools, cause"+err.Error(), + ), } } @@ -135,7 +169,11 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools(ctx cont var platformadmins v1beta1.PlatformAdminList if err := webhook.Client.List(ctx, &platformadmins); err != nil { return field.ErrorList{ - field.Invalid(field.NewPath("spec", "nodepools"), platformAdmin.Spec.NodePools, "can not list platformadmins, cause"+err.Error()), + field.Invalid( + field.NewPath("spec", "nodepools"), + platformAdmin.Spec.NodePools, + "can not list platformadmins, cause"+err.Error(), + ), } } @@ -144,7 +182,11 @@ func (webhook *PlatformAdminHandler) validatePlatformAdminWithNodePools(ctx cont for _, poolName := range platformAdmin.Spec.NodePools { if util.Contains(other.Spec.NodePools, poolName) { return field.ErrorList{ - field.Invalid(field.NewPath("spec", "nodepools"), poolName, "already used by other platformadmin instance"), + field.Invalid( + field.NewPath("spec", "nodepools"), + poolName, + "already used by other platformadmin instance", + ), } } } diff --git a/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation_test.go b/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation_test.go index 097ac7be9aa..2aa535a60ba 100644 --- a/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation_test.go +++ b/pkg/yurtmanager/webhook/platformadmin/v1beta1/platformadmin_validation_test.go @@ -33,7 +33,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client/fake" "github.com/openyurtio/openyurt/pkg/apis" - ut "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + ut "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/apis/iot/v1beta1" "github.com/openyurtio/openyurt/pkg/yurtmanager/controller/platformadmin/config" ) @@ -115,8 +115,11 @@ func TestValidateCreate(t *testing.T) { errCode: http.StatusUnprocessableEntity, }, { - name: "should get StatusUnprocessableEntityError when list PlatformAdmin failed", - client: NewFakeClient(buildClient(buildNodePool(), buildPlatformAdmin())).WithErr(&v1beta1.PlatformAdminList{}, errors.New("list failed")).Build(), + name: "should get StatusUnprocessableEntityError when list PlatformAdmin failed", + client: NewFakeClient( + buildClient(buildNodePool(), buildPlatformAdmin()), + ).WithErr(&v1beta1.PlatformAdminList{}, errors.New("list failed")). + Build(), obj: &v1beta1.PlatformAdmin{ ObjectMeta: metav1.ObjectMeta{}, Spec: v1beta1.PlatformAdminSpec{ @@ -303,7 +306,12 @@ func buildClient(nodePools []client.Object, platformAdmin []client.Object) clien scheme := runtime.NewScheme() _ = clientgoscheme.AddToScheme(scheme) _ = apis.AddToScheme(scheme) - return fake.NewClientBuilder().WithScheme(scheme).WithObjects(nodePools...).WithObjects(platformAdmin...).WithIndex(&v1beta1.PlatformAdmin{}, "spec.nodepools", Indexer).Build() + return fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(nodePools...). + WithObjects(platformAdmin...). + WithIndex(&v1beta1.PlatformAdmin{}, "spec.nodepools", Indexer). + Build() } func buildPlatformAdmin() []client.Object { diff --git a/pkg/yurtmanager/webhook/server.go b/pkg/yurtmanager/webhook/server.go index 80d24479e75..1be0c7c693b 100644 --- a/pkg/yurtmanager/webhook/server.go +++ b/pkg/yurtmanager/webhook/server.go @@ -35,7 +35,7 @@ import ( v1endpointslice "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/endpointslice/v1" v1beta1gateway "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/gateway/v1beta1" v1node "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/node/v1" - v1beta1nodepool "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/nodepool/v1beta1" + v1beta2nodepool "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/nodepool/v1beta2" v1beta1platformadmin "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/platformadmin/v1beta1" v1alpha1pod "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/pod/v1alpha1" "github.com/openyurtio/openyurt/pkg/yurtmanager/webhook/util" @@ -73,7 +73,7 @@ func addControllerWebhook(name string, handler SetupWebhookWithManager) { func init() { addControllerWebhook(names.GatewayPickupController, &v1beta1gateway.GatewayHandler{}) - addControllerWebhook(names.NodePoolController, &v1beta1nodepool.NodePoolHandler{}) + addControllerWebhook(names.NodePoolController, &v1beta2nodepool.NodePoolHandler{}) addControllerWebhook(names.YurtStaticSetController, &v1alpha1yurtstaticset.YurtStaticSetHandler{}) addControllerWebhook(names.YurtAppSetController, &v1beta1yurtappset.YurtAppSetHandler{}) addControllerWebhook(names.YurtAppDaemonController, &v1alpha1yurtappdaemon.YurtAppDaemonHandler{}) @@ -133,7 +133,11 @@ func SetupWithManager(c *config.CompletedConfig, mgr manager.Manager) error { // set up controller webhooks for controllerName, list := range controllerWebhooks { - if !app.IsControllerEnabled(controllerName, controller.ControllersDisabledByDefault, c.ComponentConfig.Generic.Controllers) { + if !app.IsControllerEnabled( + controllerName, + controller.ControllersDisabledByDefault, + c.ComponentConfig.Generic.Controllers, + ) { klog.Warningf("Webhook for %v is disabled", controllerName) continue } diff --git a/test/e2e/util/nodepool.go b/test/e2e/util/nodepool.go index 066c7337bfd..25a56c9464b 100644 --- a/test/e2e/util/nodepool.go +++ b/test/e2e/util/nodepool.go @@ -25,12 +25,12 @@ import ( "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" "github.com/openyurtio/openyurt/pkg/projectinfo" ) func CleanupNodePool(ctx context.Context, k8sClient client.Client) error { - nps := &v1beta1.NodePoolList{} + nps := &v1beta2.NodePoolList{} if err := k8sClient.List(ctx, nps); err != nil { return err } @@ -68,7 +68,11 @@ func CleanupNodePoolLabel(ctx context.Context, k8sClient client.Client) error { return nil } -func InitNodeAndNodePool(ctx context.Context, k8sClient client.Client, poolToNodesMap map[string]sets.Set[string]) error { +func InitNodeAndNodePool( + ctx context.Context, + k8sClient client.Client, + poolToNodesMap map[string]sets.Set[string], +) error { nodeToPoolMap := make(map[string]string) for k, v := range poolToNodesMap { for _, n := range sets.List(v) { @@ -77,12 +81,12 @@ func InitNodeAndNodePool(ctx context.Context, k8sClient client.Client, poolToNod } for k := range poolToNodesMap { - if err := k8sClient.Create(ctx, &v1beta1.NodePool{ + if err := k8sClient.Create(ctx, &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: k, }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }}); err != nil { return err } @@ -120,18 +124,18 @@ const ( // PrepareNodePoolWithNode will create a edge nodepool named "nodepool-with-node" and add the "openyurt-e2e-test-worker" node to this nodepool. // In order for Pods to be successfully deployed in e2e tests, a nodepool with nodes needs to be created func PrepareNodePoolWithNode(ctx context.Context, k8sClient client.Client, nodeName string) error { - if err := k8sClient.Get(ctx, client.ObjectKey{Name: NodePoolName}, &v1beta1.NodePool{}); err == nil { + if err := k8sClient.Get(ctx, client.ObjectKey{Name: NodePoolName}, &v1beta2.NodePool{}); err == nil { return nil } else if !errors.IsNotFound(err) { return err } - if err := k8sClient.Create(ctx, &v1beta1.NodePool{ + if err := k8sClient.Create(ctx, &v1beta2.NodePool{ ObjectMeta: metav1.ObjectMeta{ Name: NodePoolName, }, - Spec: v1beta1.NodePoolSpec{ - Type: v1beta1.Edge, + Spec: v1beta2.NodePoolSpec{ + Type: v1beta2.Edge, }}); err != nil { return err } diff --git a/test/e2e/util/util.go b/test/e2e/util/util.go index fbb2cc153d1..21a46dd6b09 100644 --- a/test/e2e/util/util.go +++ b/test/e2e/util/util.go @@ -35,6 +35,7 @@ import ( appsv1alpha1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1alpha1" appsv1beta1 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" + appsv1beta2 "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" iotv1alpha2 "github.com/openyurtio/openyurt/pkg/apis/iot/v1alpha2" iotv1beta1 "github.com/openyurtio/openyurt/pkg/apis/iot/v1beta1" "github.com/openyurtio/openyurt/test/e2e/yurtconfig" @@ -48,6 +49,7 @@ func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) utilruntime.Must(appsv1alpha1.AddToScheme(scheme)) utilruntime.Must(appsv1beta1.AddToScheme(scheme)) + utilruntime.Must(appsv1beta2.AddToScheme(scheme)) utilruntime.Must(iotv1alpha2.AddToScheme(scheme)) utilruntime.Must(iotv1beta1.AddToScheme(scheme)) } @@ -60,13 +62,22 @@ const ( PodStartTimeout = 5 * time.Minute ) -var EnableYurtAutonomy = flag.Bool("enable-yurt-autonomy", false, "switch of yurt node autonomy. If set to true, yurt node autonomy test can be run normally") +var EnableYurtAutonomy = flag.Bool( + "enable-yurt-autonomy", + false, + "switch of yurt node autonomy. If set to true, yurt node autonomy test can be run normally", +) var RegionID = flag.String("region-id", "", "aliyun region id for ailunyun:ecs/ens") var NodeType = flag.String("node-type", "minikube", "node type such as ailunyun:ecs/ens, minikube and user_self") var AccessKeyID = flag.String("access-key-id", "", "aliyun AccessKeyId for ailunyun:ecs/ens") var AccessKeySecret = flag.String("access-key-secret", "", "aliyun AccessKeySecret for ailunyun:ecs/ens") var Kubeconfig = flag.String("kubeconfig", "", "kubeconfig file path for OpenYurt cluster") -var ReportDir = flag.String("report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.") + +var ReportDir = flag.String( + "report-dir", + "", + "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.", +) // LoadRestConfigAndClientset returns rest config and clientset for connecting to kubernetes clusters. func LoadRestConfigAndClientset(kubeconfig string) (*restclient.Config, *clientset.Clientset, error) { diff --git a/test/e2e/yurt/nodepool.go b/test/e2e/yurt/nodepool.go index 3d835fc320e..109beec7483 100644 --- a/test/e2e/yurt/nodepool.go +++ b/test/e2e/yurt/nodepool.go @@ -28,7 +28,7 @@ package yurt // "k8s.io/apimachinery/pkg/util/sets" // runtimeclient "sigs.k8s.io/controller-runtime/pkg/client" // -// "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta1" +// "github.com/openyurtio/openyurt/pkg/apis/apps/v1beta2" // "github.com/openyurtio/openyurt/test/e2e/util" // ycfg "github.com/openyurtio/openyurt/test/e2e/yurtconfig" //) @@ -39,7 +39,7 @@ package yurt // poolToNodesMap := make(map[string]sets.String) // // checkNodePoolStatus := func(poolToNodesMap map[string]sets.String) error { -// nps := &v1beta1.NodePoolList{} +// nps := &v1beta2.NodePoolList{} // if err := k8sClient.List(ctx, nps); err != nil { // return err // }