diff --git a/Makefile b/Makefile index 41757a0b..680ab382 100644 --- a/Makefile +++ b/Makefile @@ -289,6 +289,7 @@ generate-e2e-templates: $(KUSTOMIZE) $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-externally-managed-vcn --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-externally-managed-vcn.yaml $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-machine-pool --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-machine-pool.yaml $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-managed --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-managed.yaml + $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-node-recycling --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-node-recycling.yaml $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-cluster-identity --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-cluster-identity.yaml $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-cluster-identity --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-cluster-identity.yaml $(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-windows-calico --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-windows-calico.yaml diff --git a/cloud/scope/managed_control_plane.go b/cloud/scope/managed_control_plane.go index ef55eba5..5bf63f1c 100644 --- a/cloud/scope/managed_control_plane.go +++ b/cloud/scope/managed_control_plane.go @@ -588,6 +588,9 @@ func (s *ManagedControlPlaneScope) UpdateControlPlane(ctx context.Context, okeCl // there is a chance user will edit the cluster func setControlPlaneSpecDefaults(spec *infrav2exp.OCIManagedControlPlaneSpec) { spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{} + if spec.ClusterType == "" { + spec.ClusterType = infrav2exp.BasicClusterType + } if spec.ImagePolicyConfig == nil { spec.ImagePolicyConfig = &infrav2exp.ImagePolicyConfig{ IsPolicyEnabled: common.Bool(false), @@ -663,6 +666,7 @@ func (s *ManagedControlPlaneScope) getSpecFromActual(cluster *oke.Cluster) *infr spec.ClusterType = infrav2exp.EnhancedClusterType break default: + spec.ClusterType = infrav2exp.BasicClusterType break } } diff --git a/cloud/scope/managed_control_plane_test.go b/cloud/scope/managed_control_plane_test.go index 1509b982..4f9d985a 100644 --- a/cloud/scope/managed_control_plane_test.go +++ b/cloud/scope/managed_control_plane_test.go @@ -473,6 +473,7 @@ func TestControlPlaneUpdation(t *testing.T) { CompartmentId: common.String("test-compartment"), VcnId: common.String("vcn-id"), KubernetesVersion: common.String("v1.24.5"), + Type: oke.ClusterTypeBasicCluster, FreeformTags: tags, DefinedTags: definedTagsInterface, EndpointConfig: &oke.ClusterEndpointConfig{ diff --git a/cloud/scope/managed_machine_pool.go b/cloud/scope/managed_machine_pool.go index 1345fdd8..6221cfe0 100644 --- a/cloud/scope/managed_machine_pool.go +++ b/cloud/scope/managed_machine_pool.go @@ -313,6 +313,14 @@ func (m *ManagedMachinePoolScope) CreateNodePool(ctx context.Context) (*oke.Node IsForceDeleteAfterGraceDuration: m.OCIManagedMachinePool.Spec.NodeEvictionNodePoolSettings.IsForceDeleteAfterGraceDuration, } } + recycleConfig := m.OCIManagedMachinePool.Spec.NodePoolCyclingDetails + if recycleConfig != nil { + nodePoolDetails.NodePoolCyclingDetails = &oke.NodePoolCyclingDetails{ + IsNodeCyclingEnabled: recycleConfig.IsNodeCyclingEnabled, + MaximumSurge: recycleConfig.MaximumSurge, + MaximumUnavailable: recycleConfig.MaximumUnavailable, + } + } nodePoolDetails.InitialNodeLabels = m.getInitialNodeKeyValuePairs() req := oke.CreateNodePoolRequest{ @@ -603,16 +611,22 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke. return false, err } m.Logger.Info("Node pool", "spec", jsonSpec, "actual", jsonActual) - placementConfig, err := m.buildPlacementConfig(spec.NodePoolNodeConfig.PlacementConfigs) - if err != nil { - return false, err - } + nodeConfigDetails := oke.UpdateNodePoolNodeConfigDetails{ NsgIds: m.getWorkerMachineNSGs(), - PlacementConfigs: placementConfig, IsPvEncryptionInTransitEnabled: spec.NodePoolNodeConfig.IsPvEncryptionInTransitEnabled, KmsKeyId: spec.NodePoolNodeConfig.KmsKeyId, } + // send placement config only if there is an actual change in placement + // placement config and recycle config cannot be sent at the same time, and most use cases will + // be to update kubernetes version in which case, placement config is not required to be sent + if !reflect.DeepEqual(spec.NodePoolNodeConfig.PlacementConfigs, actual.NodePoolNodeConfig.PlacementConfigs) { + placementConfig, err := m.buildPlacementConfig(spec.NodePoolNodeConfig.PlacementConfigs) + if err != nil { + return false, err + } + nodeConfigDetails.PlacementConfigs = placementConfig + } if nodePoolSizeUpdateRequired { nodeConfigDetails.Size = common.Int(int(*m.MachinePool.Spec.Replicas)) } @@ -643,7 +657,9 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke. return false, err } sourceDetails := oke.NodeSourceViaImageDetails{ - ImageId: spec.NodeSourceViaImage.ImageId, + // use image id from machinepool spec itself as the copy will not have the image set in the + // setNodepoolImageId method above + ImageId: m.OCIManagedMachinePool.Spec.NodeSourceViaImage.ImageId, BootVolumeSizeInGBs: spec.NodeSourceViaImage.BootVolumeSizeInGBs, } @@ -672,6 +688,19 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke. NodeConfigDetails: &nodeConfigDetails, NodeMetadata: spec.NodeMetadata, } + recycleConfig := spec.NodePoolCyclingDetails + // cannot send recycle config and placement config together + if recycleConfig != nil && len(nodeConfigDetails.PlacementConfigs) == 0 { + nodePoolDetails.NodePoolCyclingDetails = &oke.NodePoolCyclingDetails{ + IsNodeCyclingEnabled: recycleConfig.IsNodeCyclingEnabled, + MaximumSurge: recycleConfig.MaximumSurge, + MaximumUnavailable: recycleConfig.MaximumUnavailable, + } + } + if recycleConfig != nil && len(nodeConfigDetails.PlacementConfigs) != 0 { + m.Logger.V(LogLevelWarn).Info("Placement configuration has been changed in the update, " + + "hence node pool recycling configuration will not be sent with the update request") + } if spec.NodeEvictionNodePoolSettings != nil { nodePoolDetails.NodeEvictionNodePoolSettings = &oke.NodeEvictionNodePoolSettings{ EvictionGraceDuration: spec.NodeEvictionNodePoolSettings.EvictionGraceDuration, @@ -701,6 +730,7 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke. func setMachinePoolSpecDefaults(spec *infrav2exp.OCIManagedMachinePoolSpec) { spec.ProviderIDList = nil spec.ProviderID = nil + if spec.NodePoolNodeConfig != nil { if spec.NodePoolNodeConfig.PlacementConfigs != nil { configs := spec.NodePoolNodeConfig.PlacementConfigs @@ -782,6 +812,14 @@ func (m *ManagedMachinePoolScope) getSpecFromAPIObject(pool *oke.NodePool) *expi } spec.NodeShapeConfig = &nodeShapeConfig } + if pool.NodePoolCyclingDetails != nil { + cyclingDetails := pool.NodePoolCyclingDetails + spec.NodePoolCyclingDetails = &expinfra1.NodePoolCyclingDetails{ + IsNodeCyclingEnabled: cyclingDetails.IsNodeCyclingEnabled, + MaximumSurge: cyclingDetails.MaximumSurge, + MaximumUnavailable: cyclingDetails.MaximumUnavailable, + } + } return &spec } diff --git a/cloud/scope/managed_machine_pool_test.go b/cloud/scope/managed_machine_pool_test.go index a8028e19..70e8f782 100644 --- a/cloud/scope/managed_machine_pool_test.go +++ b/cloud/scope/managed_machine_pool_test.go @@ -1129,15 +1129,7 @@ func TestManagedMachinePoolUpdate(t *testing.T) { }, SshPublicKey: common.String("test-ssh-public-key"), NodeConfigDetails: &oke.UpdateNodePoolNodeConfigDetails{ - Size: common.Int(4), - PlacementConfigs: []oke.NodePoolPlacementConfigDetails{ - { - AvailabilityDomain: common.String("test-ad"), - CapacityReservationId: common.String("cap-id"), - SubnetId: common.String("subnet-id"), - FaultDomains: []string{"fd-1", "fd-2"}, - }, - }, + Size: common.Int(4), NsgIds: []string{"nsg-id"}, KmsKeyId: common.String("kms-key-id"), IsPvEncryptionInTransitEnabled: common.Bool(true), @@ -1376,14 +1368,6 @@ func TestManagedMachinePoolUpdate(t *testing.T) { }, SshPublicKey: common.String("test-ssh-public-key"), NodeConfigDetails: &oke.UpdateNodePoolNodeConfigDetails{ - PlacementConfigs: []oke.NodePoolPlacementConfigDetails{ - { - AvailabilityDomain: common.String("test-ad"), - CapacityReservationId: common.String("cap-id"), - SubnetId: common.String("subnet-id"), - FaultDomains: []string{"fd-1", "fd-2"}, - }, - }, NsgIds: []string{"nsg-id"}, KmsKeyId: common.String("kms-key-id"), IsPvEncryptionInTransitEnabled: common.Bool(true), @@ -1618,6 +1602,11 @@ func TestManagedMachinePoolUpdate(t *testing.T) { NodeSourceViaImage: &infrav2exp.NodeSourceViaImage{ ImageId: common.String("test-image-id"), }, + NodePoolCyclingDetails: &infrav2exp.NodePoolCyclingDetails{ + IsNodeCyclingEnabled: common.Bool(true), + MaximumSurge: common.String("20%"), + MaximumUnavailable: common.String("10%"), + }, SshPublicKey: "test-ssh-public-key", NodePoolNodeConfig: &infrav2exp.NodePoolNodeConfig{ PlacementConfigs: []infrav2exp.PlacementConfig{ @@ -1663,16 +1652,13 @@ func TestManagedMachinePoolUpdate(t *testing.T) { NodeSourceDetails: &oke.NodeSourceViaImageDetails{ ImageId: common.String("test-image-id"), }, + NodePoolCyclingDetails: &oke.NodePoolCyclingDetails{ + IsNodeCyclingEnabled: common.Bool(true), + MaximumSurge: common.String("20%"), + MaximumUnavailable: common.String("10%"), + }, SshPublicKey: common.String("test-ssh-public-key"), NodeConfigDetails: &oke.UpdateNodePoolNodeConfigDetails{ - PlacementConfigs: []oke.NodePoolPlacementConfigDetails{ - { - AvailabilityDomain: common.String("test-ad"), - CapacityReservationId: common.String("cap-id"), - SubnetId: common.String("subnet-id"), - FaultDomains: []string{"fd-1", "fd-2"}, - }, - }, NsgIds: []string{"nsg-id"}, KmsKeyId: common.String("kms-key-id"), IsPvEncryptionInTransitEnabled: common.Bool(true), diff --git a/cloud/scope/util.go b/cloud/scope/util.go index 53df2a72..01310e74 100644 --- a/cloud/scope/util.go +++ b/cloud/scope/util.go @@ -20,6 +20,10 @@ import ( infrastructurev1beta2 "github.com/oracle/cluster-api-provider-oci/api/v1beta2" ) +const ( + LogLevelWarn = 3 +) + // GetNsgNamesFromId returns the names of the NSGs with the provided IDs func GetNsgNamesFromId(ids []string, nsgs []*infrastructurev1beta2.NSG) []string { names := make([]string, 0) diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepools.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepools.yaml index feb719a1..9c48e0cd 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepools.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepools.yaml @@ -339,6 +339,28 @@ spec: description: NodeMetadata defines a list of key/value pairs to add to each underlying OCI instance in the node pool on launch. type: object + nodePoolCyclingDetails: + description: NodePoolCyclingDetails defines the node pool recycling + options. + properties: + isNodeCyclingEnabled: + description: IsNodeCyclingEnabled refers if nodes in the nodepool + will be cycled to have new changes. + type: boolean + maximumSurge: + description: MaximumSurge refers to the maximum additional new + compute instances that would be temporarily created and added + to nodepool during the cycling nodepool process. OKE supports + both integer and percentage input. Defaults to 1, Ranges from + 0 to Nodepool size or 0% to 100% + type: string + maximumUnavailable: + description: Maximum active nodes that would be terminated from + nodepool during the cycling nodepool process. OKE supports both + integer and percentage input. Defaults to 0, Ranges from 0 to + Nodepool size or 0% to 100% + type: string + type: object nodePoolNodeConfig: description: NodePoolNodeConfig defines the configuration of nodes in the node pool. diff --git a/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepooltemplates.yaml b/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepooltemplates.yaml index 0448e4c1..13e8a7af 100644 --- a/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepooltemplates.yaml +++ b/config/crd/bases/infrastructure.cluster.x-k8s.io_ocimanagedmachinepooltemplates.yaml @@ -308,6 +308,28 @@ spec: to add to each underlying OCI instance in the node pool on launch. type: object + nodePoolCyclingDetails: + description: NodePoolCyclingDetails defines the node pool + recycling options. + properties: + isNodeCyclingEnabled: + description: IsNodeCyclingEnabled refers if nodes in the + nodepool will be cycled to have new changes. + type: boolean + maximumSurge: + description: MaximumSurge refers to the maximum additional + new compute instances that would be temporarily created + and added to nodepool during the cycling nodepool process. + OKE supports both integer and percentage input. Defaults + to 1, Ranges from 0 to Nodepool size or 0% to 100% + type: string + maximumUnavailable: + description: Maximum active nodes that would be terminated + from nodepool during the cycling nodepool process. OKE + supports both integer and percentage input. Defaults + to 0, Ranges from 0 to Nodepool size or 0% to 100% + type: string + type: object nodePoolNodeConfig: description: NodePoolNodeConfig defines the configuration of nodes in the node pool. diff --git a/exp/api/v1beta1/conversion.go b/exp/api/v1beta1/conversion.go index 71b2e046..47d9b0ad 100644 --- a/exp/api/v1beta1/conversion.go +++ b/exp/api/v1beta1/conversion.go @@ -51,3 +51,7 @@ func Convert_v1beta2_NetworkDetails_To_v1beta1_NetworkDetails(in *infrastructure func Convert_v1beta2_OCIManagedControlPlaneSpec_To_v1beta1_OCIManagedControlPlaneSpec(in *v1beta2.OCIManagedControlPlaneSpec, out *OCIManagedControlPlaneSpec, s conversion.Scope) error { return autoConvert_v1beta2_OCIManagedControlPlaneSpec_To_v1beta1_OCIManagedControlPlaneSpec(in, out, s) } + +func Convert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(in *v1beta2.OCIManagedMachinePoolSpec, out *OCIManagedMachinePoolSpec, s conversion.Scope) error { + return autoConvert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(in, out, s) +} diff --git a/exp/api/v1beta1/ocimanagedmachinepool_conversion.go b/exp/api/v1beta1/ocimanagedmachinepool_conversion.go index 7a0946ed..98ba0a59 100644 --- a/exp/api/v1beta1/ocimanagedmachinepool_conversion.go +++ b/exp/api/v1beta1/ocimanagedmachinepool_conversion.go @@ -34,6 +34,7 @@ func (src *OCIManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error { if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { return err } + dst.Spec.NodePoolCyclingDetails = restored.Spec.NodePoolCyclingDetails return nil } diff --git a/exp/api/v1beta1/zz_generated.conversion.go b/exp/api/v1beta1/zz_generated.conversion.go index fbb2b822..7ad59bc1 100644 --- a/exp/api/v1beta1/zz_generated.conversion.go +++ b/exp/api/v1beta1/zz_generated.conversion.go @@ -431,11 +431,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta2.OCIManagedMachinePoolSpec)(nil), (*OCIManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(a.(*v1beta2.OCIManagedMachinePoolSpec), b.(*OCIManagedMachinePoolSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*OCIManagedMachinePoolStatus)(nil), (*v1beta2.OCIManagedMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_OCIManagedMachinePoolStatus_To_v1beta2_OCIManagedMachinePoolStatus(a.(*OCIManagedMachinePoolStatus), b.(*v1beta2.OCIManagedMachinePoolStatus), scope) }); err != nil { @@ -626,6 +621,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta2.OCIManagedMachinePoolSpec)(nil), (*OCIManagedMachinePoolSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(a.(*v1beta2.OCIManagedMachinePoolSpec), b.(*OCIManagedMachinePoolSpec), scope) + }); err != nil { + return err + } return nil } @@ -1746,7 +1746,17 @@ func Convert_v1beta2_OCIManagedMachinePool_To_v1beta1_OCIManagedMachinePool(in * func autoConvert_v1beta1_OCIManagedMachinePoolList_To_v1beta2_OCIManagedMachinePoolList(in *OCIManagedMachinePoolList, out *v1beta2.OCIManagedMachinePoolList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1beta2.OCIManagedMachinePool)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1beta2.OCIManagedMachinePool, len(*in)) + for i := range *in { + if err := Convert_v1beta1_OCIManagedMachinePool_To_v1beta2_OCIManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -1757,7 +1767,17 @@ func Convert_v1beta1_OCIManagedMachinePoolList_To_v1beta2_OCIManagedMachinePoolL func autoConvert_v1beta2_OCIManagedMachinePoolList_To_v1beta1_OCIManagedMachinePoolList(in *v1beta2.OCIManagedMachinePoolList, out *OCIManagedMachinePoolList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]OCIManagedMachinePool)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIManagedMachinePool, len(*in)) + for i := range *in { + if err := Convert_v1beta2_OCIManagedMachinePool_To_v1beta1_OCIManagedMachinePool(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -1799,15 +1819,11 @@ func autoConvert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachineP out.SshPublicKey = in.SshPublicKey out.NodeMetadata = *(*map[string]string)(unsafe.Pointer(&in.NodeMetadata)) out.InitialNodeLabels = *(*[]KeyValue)(unsafe.Pointer(&in.InitialNodeLabels)) + // WARNING: in.NodePoolCyclingDetails requires manual conversion: does not exist in peer-type out.ProviderIDList = *(*[]string)(unsafe.Pointer(&in.ProviderIDList)) return nil } -// Convert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec is an autogenerated conversion function. -func Convert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(in *v1beta2.OCIManagedMachinePoolSpec, out *OCIManagedMachinePoolSpec, s conversion.Scope) error { - return autoConvert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(in, out, s) -} - func autoConvert_v1beta1_OCIManagedMachinePoolStatus_To_v1beta2_OCIManagedMachinePoolStatus(in *OCIManagedMachinePoolStatus, out *v1beta2.OCIManagedMachinePoolStatus, s conversion.Scope) error { out.Ready = in.Ready out.Conditions = *(*clusterapiapiv1beta1.Conditions)(unsafe.Pointer(&in.Conditions)) @@ -1864,7 +1880,17 @@ func Convert_v1beta2_OCIManagedMachinePoolTemplate_To_v1beta1_OCIManagedMachineP func autoConvert_v1beta1_OCIManagedMachinePoolTemplateList_To_v1beta2_OCIManagedMachinePoolTemplateList(in *OCIManagedMachinePoolTemplateList, out *v1beta2.OCIManagedMachinePoolTemplateList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]v1beta2.OCIManagedMachinePoolTemplate)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1beta2.OCIManagedMachinePoolTemplate, len(*in)) + for i := range *in { + if err := Convert_v1beta1_OCIManagedMachinePoolTemplate_To_v1beta2_OCIManagedMachinePoolTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } @@ -1875,7 +1901,17 @@ func Convert_v1beta1_OCIManagedMachinePoolTemplateList_To_v1beta2_OCIManagedMach func autoConvert_v1beta2_OCIManagedMachinePoolTemplateList_To_v1beta1_OCIManagedMachinePoolTemplateList(in *v1beta2.OCIManagedMachinePoolTemplateList, out *OCIManagedMachinePoolTemplateList, s conversion.Scope) error { out.ListMeta = in.ListMeta - out.Items = *(*[]OCIManagedMachinePoolTemplate)(unsafe.Pointer(&in.Items)) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]OCIManagedMachinePoolTemplate, len(*in)) + for i := range *in { + if err := Convert_v1beta2_OCIManagedMachinePoolTemplate_To_v1beta1_OCIManagedMachinePoolTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } return nil } diff --git a/exp/api/v1beta2/ocimanagedmachinepool_types.go b/exp/api/v1beta2/ocimanagedmachinepool_types.go index 3db67049..5937da90 100644 --- a/exp/api/v1beta2/ocimanagedmachinepool_types.go +++ b/exp/api/v1beta2/ocimanagedmachinepool_types.go @@ -59,6 +59,10 @@ type OCIManagedMachinePoolSpec struct { // +optional InitialNodeLabels []KeyValue `json:"initialNodeLabels,omitempty"` + // NodePoolCyclingDetails defines the node pool recycling options. + // +optional + NodePoolCyclingDetails *NodePoolCyclingDetails `json:"nodePoolCyclingDetails,omitempty"` + // ProviderIDList are the identification IDs of machine instances provided by the provider. // This field must match the provider IDs as seen on the node objects corresponding to a machine pool's machine instances. // +optional @@ -191,6 +195,25 @@ type KeyValue struct { Value *string `json:"value,omitempty"` } +// NodePoolCyclingDetails defines the node pool recycling options +type NodePoolCyclingDetails struct { + + // IsNodeCyclingEnabled refers if nodes in the nodepool will be cycled to have new changes. + // +optional + IsNodeCyclingEnabled *bool `json:"isNodeCyclingEnabled,omitempty"` + + // MaximumSurge refers to the maximum additional new compute instances that would be temporarily created and + // added to nodepool during the cycling nodepool process. OKE supports both integer and percentage input. + // Defaults to 1, Ranges from 0 to Nodepool size or 0% to 100% + // +optional + MaximumSurge *string `json:"maximumSurge,omitempty"` + + // Maximum active nodes that would be terminated from nodepool during the cycling nodepool process. + // OKE supports both integer and percentage input. Defaults to 0, Ranges from 0 to Nodepool size or 0% to 100% + // +optional + MaximumUnavailable *string `json:"maximumUnavailable,omitempty"` +} + // OCIManagedMachinePoolStatus defines the observed state of OCIManagedMachinePool type OCIManagedMachinePoolStatus struct { // +optional diff --git a/exp/api/v1beta2/ocimanagedmachinepool_webhook.go b/exp/api/v1beta2/ocimanagedmachinepool_webhook.go index e3fce0e1..716e9bd1 100644 --- a/exp/api/v1beta2/ocimanagedmachinepool_webhook.go +++ b/exp/api/v1beta2/ocimanagedmachinepool_webhook.go @@ -17,6 +17,9 @@ limitations under the License. package v1beta2 import ( + "fmt" + "reflect" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" @@ -86,7 +89,28 @@ func (m *OCIManagedMachinePool) validateVersion(allErrs field.ErrorList) field.E func (m *OCIManagedMachinePool) ValidateUpdate(old runtime.Object) error { var allErrs field.ErrorList + oldManagedMachinePool, ok := old.(*OCIManagedMachinePool) + if !ok { + return apierrors.NewBadRequest(fmt.Sprintf("expected an OCIManagedMachinePool but got a %T", old)) + } + allErrs = m.validateVersion(allErrs) + if !reflect.DeepEqual(m.Spec.Version, oldManagedMachinePool.Spec.Version) { + newImage := m.getImageId() + oldImage := oldManagedMachinePool.getImageId() + // if an image has been provided in updated machine pool and it matches old image id, + // and if Kubernetes version has been updated, that means the image is not correct. If the version has + // been changed, the image should have been updated by the user, or set as nil in which case + // CAPOCI will lookup a correct image + if newImage != nil && reflect.DeepEqual(newImage, oldImage) { + allErrs = append( + allErrs, + field.Invalid(field.NewPath("spec", "nodeSourceViaImage", "imageId"), + m.getImageId(), "image id has not been updated for the newer version, "+ + "either provide a newer image or set the field as nil")) + + } + } if len(allErrs) == 0 { return nil } @@ -96,3 +120,10 @@ func (m *OCIManagedMachinePool) ValidateUpdate(old runtime.Object) error { func (m *OCIManagedMachinePool) ValidateDelete() error { return nil } + +func (m *OCIManagedMachinePool) getImageId() *string { + if m.Spec.NodeSourceViaImage != nil { + return m.Spec.NodeSourceViaImage.ImageId + } + return nil +} diff --git a/exp/api/v1beta2/ocimanagedmachinepool_webhook_test.go b/exp/api/v1beta2/ocimanagedmachinepool_webhook_test.go index 55c76a06..b7797e16 100644 --- a/exp/api/v1beta2/ocimanagedmachinepool_webhook_test.go +++ b/exp/api/v1beta2/ocimanagedmachinepool_webhook_test.go @@ -146,10 +146,12 @@ func TestOCIManagedMachinePool_ValidateCreate(t *testing.T) { func TestOCIManagedMachinePool_ValidateUpdate(t *testing.T) { validVersion := common.String("v1.25.1") + oldVersion := common.String("v1.24.1") inValidVersion := common.String("abcd") tests := []struct { name string m *OCIManagedMachinePool + old *OCIManagedMachinePool errorMgsShouldContain string expectErr bool }{ @@ -163,6 +165,14 @@ func TestOCIManagedMachinePool_ValidateUpdate(t *testing.T) { Version: validVersion, }, }, + old: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: validVersion, + }, + }, expectErr: false, }, { @@ -172,6 +182,11 @@ func TestOCIManagedMachinePool_ValidateUpdate(t *testing.T) { Name: "abcdefghijklmno", }, }, + old: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + }, expectErr: true, }, { @@ -184,19 +199,95 @@ func TestOCIManagedMachinePool_ValidateUpdate(t *testing.T) { Version: inValidVersion, }, }, + old: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + }, + expectErr: true, + }, + { + name: "should allow version update with different images", + m: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: validVersion, + NodeSourceViaImage: &NodeSourceViaImage{ + ImageId: common.String("new"), + }, + }, + }, + old: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: oldVersion, + NodeSourceViaImage: &NodeSourceViaImage{ + ImageId: common.String("old"), + }, + }, + }, + expectErr: false, + }, + { + name: "should allow version update with both nil", + m: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: validVersion, + }, + }, + old: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: oldVersion, + }, + }, + expectErr: false, + }, + { + name: "should not allow version update with same image", + m: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: validVersion, + NodeSourceViaImage: &NodeSourceViaImage{ + ImageId: common.String("old"), + }, + }, + }, + old: &OCIManagedMachinePool{ + ObjectMeta: metav1.ObjectMeta{ + Name: "abcdefghijklmno", + }, + Spec: OCIManagedMachinePoolSpec{ + Version: oldVersion, + NodeSourceViaImage: &NodeSourceViaImage{ + ImageId: common.String("old"), + }, + }, + }, expectErr: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { g := gomega.NewWithT(t) - + err := test.m.ValidateUpdate(test.old) if test.expectErr { - err := test.m.ValidateUpdate(nil) g.Expect(err).NotTo(gomega.Succeed()) g.Expect(strings.Contains(err.Error(), test.errorMgsShouldContain)).To(gomega.BeTrue()) } else { - g.Expect(test.m.ValidateCreate()).To(gomega.Succeed()) + g.Expect(err).To(gomega.Succeed()) } }) } diff --git a/exp/api/v1beta2/zz_generated.deepcopy.go b/exp/api/v1beta2/zz_generated.deepcopy.go index 745bba90..1cf9577c 100644 --- a/exp/api/v1beta2/zz_generated.deepcopy.go +++ b/exp/api/v1beta2/zz_generated.deepcopy.go @@ -435,6 +435,36 @@ func (in *NodeEvictionNodePoolSettings) DeepCopy() *NodeEvictionNodePoolSettings return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodePoolCyclingDetails) DeepCopyInto(out *NodePoolCyclingDetails) { + *out = *in + if in.IsNodeCyclingEnabled != nil { + in, out := &in.IsNodeCyclingEnabled, &out.IsNodeCyclingEnabled + *out = new(bool) + **out = **in + } + if in.MaximumSurge != nil { + in, out := &in.MaximumSurge, &out.MaximumSurge + *out = new(string) + **out = **in + } + if in.MaximumUnavailable != nil { + in, out := &in.MaximumUnavailable, &out.MaximumUnavailable + *out = new(string) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodePoolCyclingDetails. +func (in *NodePoolCyclingDetails) DeepCopy() *NodePoolCyclingDetails { + if in == nil { + return nil + } + out := new(NodePoolCyclingDetails) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodePoolNodeConfig) DeepCopyInto(out *NodePoolNodeConfig) { *out = *in @@ -1235,6 +1265,11 @@ func (in *OCIManagedMachinePoolSpec) DeepCopyInto(out *OCIManagedMachinePoolSpec (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.NodePoolCyclingDetails != nil { + in, out := &in.NodePoolCyclingDetails, &out.NodePoolCyclingDetails + *out = new(NodePoolCyclingDetails) + (*in).DeepCopyInto(*out) + } if in.ProviderIDList != nil { in, out := &in.ProviderIDList, &out.ProviderIDList *out = make([]string, len(*in)) diff --git a/exp/controllers/ocimanagedcluster_controlplane_controller_test.go b/exp/controllers/ocimanagedcluster_controlplane_controller_test.go index bbd6c3f5..17a0c08b 100644 --- a/exp/controllers/ocimanagedcluster_controlplane_controller_test.go +++ b/exp/controllers/ocimanagedcluster_controlplane_controller_test.go @@ -284,6 +284,7 @@ func TestControlPlaneReconciliationFunction(t *testing.T) { Cluster: oke.Cluster{ Id: common.String("test"), Name: common.String("test"), + Type: oke.ClusterTypeBasicCluster, CompartmentId: common.String("test-compartment"), VcnId: common.String("vcn-id"), KubernetesVersion: common.String("v1.24.5"), diff --git a/go.mod b/go.mod index 63190fbc..2aced89f 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/google/gofuzz v1.2.0 github.com/onsi/ginkgo/v2 v2.9.2 github.com/onsi/gomega v1.27.5 - github.com/oracle/oci-go-sdk/v65 v65.33.1 + github.com/oracle/oci-go-sdk/v65 v65.40.1 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.15.1 github.com/spf13/pflag v1.0.5 diff --git a/go.sum b/go.sum index e51c5c44..4affb2be 100644 --- a/go.sum +++ b/go.sum @@ -369,8 +369,8 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/oracle/oci-go-sdk/v65 v65.33.1 h1:+AmUc1J1eY39Ys8hwXExK5ju7oieDrcC4KkXaQDMztg= -github.com/oracle/oci-go-sdk/v65 v65.33.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQmCOmbX5kjVEJodw= +github.com/oracle/oci-go-sdk/v65 v65.40.1 h1:nukjC4GfrpOxOEoGvqg8y31/11VtaeSnejF7icyMKJg= +github.com/oracle/oci-go-sdk/v65 v65.40.1/go.mod h1:MXMLMzHnnd9wlpgadPkdlkZ9YrwQmCOmbX5kjVEJodw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= diff --git a/test/e2e/config/e2e_conf.yaml b/test/e2e/config/e2e_conf.yaml index 00aa99b0..1d377bc9 100644 --- a/test/e2e/config/e2e_conf.yaml +++ b/test/e2e/config/e2e_conf.yaml @@ -71,6 +71,7 @@ providers: - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-externally-managed-vcn.yaml" - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-machine-pool.yaml" - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-managed.yaml" + - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling.yaml" - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-managed-virtual.yaml" - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-managed-cluster-identity.yaml" - sourcePath: "../data/infrastructure-oci/v1beta2/cluster-template-cluster-identity.yaml" diff --git a/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/cluster.yaml b/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/cluster.yaml new file mode 100644 index 00000000..35edd427 --- /dev/null +++ b/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/cluster.yaml @@ -0,0 +1,37 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: OCIManagedCluster + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" + controlPlaneRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: OCIManagedControlPlane + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: OCIManagedCluster +metadata: + labels: + cluster.x-k8s.io/cluster-name: "${CLUSTER_NAME}" + name: "${CLUSTER_NAME}" +spec: + compartmentId: "${OCI_COMPARTMENT_ID}" +--- +kind: OCIManagedControlPlane +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +metadata: + name: "${CLUSTER_NAME}" + namespace: "${NAMESPACE}" +spec: + version: "${OCI_MANAGED_KUBERNETES_VERSION_UPGRADE}" + clusterType: "ENHANCED_CLUSTER" +--- \ No newline at end of file diff --git a/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/kustomization.yaml b/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/kustomization.yaml new file mode 100644 index 00000000..5cf6f22c --- /dev/null +++ b/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/kustomization.yaml @@ -0,0 +1,3 @@ +bases: + - ./cluster.yaml + - ./machine-pool.yaml \ No newline at end of file diff --git a/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/machine-pool.yaml b/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/machine-pool.yaml new file mode 100644 index 00000000..53d04d15 --- /dev/null +++ b/test/e2e/data/infrastructure-oci/v1beta2/cluster-template-managed-node-recycling/machine-pool.yaml @@ -0,0 +1,38 @@ +--- +# testing nodepool without image id set +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachinePool +metadata: + name: ${CLUSTER_NAME}-mp-1 + namespace: default +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${NODE_MACHINE_COUNT} + template: + spec: + clusterName: ${CLUSTER_NAME} + bootstrap: + dataSecretName: "" + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 + kind: OCIManagedMachinePool + name: ${CLUSTER_NAME}-mp-1 + version: ${KUBERNETES_VERSION} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta2 +kind: OCIManagedMachinePool +metadata: + name: ${CLUSTER_NAME}-mp-1 + namespace: default +spec: + version: "${OCI_MANAGED_KUBERNETES_VERSION}" + nodeShape: "${OCI_MANAGED_NODE_SHAPE}" + sshPublicKey: "${OCI_SSH_KEY}" + nodeSourceViaImage: + bootVolumeSizeInGBs: 50 + nodeShapeConfig: + memoryInGBs: "16" + ocpus: "1" + nodePoolCyclingDetails: + isNodeCyclingEnabled: true +--- \ No newline at end of file diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index d33a5c30..7cd40670 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -33,12 +33,14 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" infrastructurev1beta1 "github.com/oracle/cluster-api-provider-oci/api/v1beta1" + infrastructurev1beta2 "github.com/oracle/cluster-api-provider-oci/api/v1beta2" oci_config "github.com/oracle/cluster-api-provider-oci/cloud/config" "github.com/oracle/cluster-api-provider-oci/cloud/scope" "github.com/oracle/cluster-api-provider-oci/cloud/services/compute" nlb "github.com/oracle/cluster-api-provider-oci/cloud/services/networkloadbalancer" "github.com/oracle/cluster-api-provider-oci/cloud/services/vcn" infrav1exp "github.com/oracle/cluster-api-provider-oci/exp/api/v1beta1" + infrav2exp "github.com/oracle/cluster-api-provider-oci/exp/api/v1beta2" "github.com/oracle/oci-go-sdk/v65/common" "github.com/oracle/oci-go-sdk/v65/identity" "k8s.io/apimachinery/pkg/runtime" @@ -264,7 +266,9 @@ func initScheme() *runtime.Scheme { scheme := runtime.NewScheme() framework.TryAddDefaultSchemes(scheme) Expect(infrastructurev1beta1.AddToScheme(scheme)).To(Succeed()) + Expect(infrastructurev1beta2.AddToScheme(scheme)).To(Succeed()) Expect(infrav1exp.AddToScheme(scheme)).To(Succeed()) + Expect(infrav2exp.AddToScheme(scheme)).To(Succeed()) Expect(clusterv1.AddToScheme(scheme)).To(Succeed()) Expect(clusterv1exp.AddToScheme(scheme)).To(Succeed()) return scheme diff --git a/test/e2e/managed_cluster_test.go b/test/e2e/managed_cluster_test.go index c3f85496..56494532 100644 --- a/test/e2e/managed_cluster_test.go +++ b/test/e2e/managed_cluster_test.go @@ -25,22 +25,27 @@ import ( "os" "path/filepath" "reflect" + "strings" "time" . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" infrav1exp "github.com/oracle/cluster-api-provider-oci/exp/api/v1beta1" + infrav2exp "github.com/oracle/cluster-api-provider-oci/exp/api/v1beta2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog/v2" "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/clusterctl" "sigs.k8s.io/cluster-api/util" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/kind/pkg/errors" ) const ( @@ -217,6 +222,51 @@ var _ = Describe("Managed Workload cluster creation", func() { clusterctl.ApplyClusterTemplateAndWait(ctx, input, result) }) + It("Managed Cluster - Node Recycling", func() { + clusterName = getClusterName(clusterNamePrefix, "cls-iden") + input := clusterctl.ApplyClusterTemplateAndWaitInput{ + ClusterProxy: bootstrapClusterProxy, + ConfigCluster: clusterctl.ConfigClusterInput{ + LogFolder: filepath.Join(artifactFolder, "clusters", bootstrapClusterProxy.GetName()), + ClusterctlConfigPath: clusterctlConfigPath, + KubeconfigPath: bootstrapClusterProxy.GetKubeconfigPath(), + InfrastructureProvider: clusterctl.DefaultInfrastructureProvider, + Flavor: "managed-node-recycling", + Namespace: namespace.Name, + ClusterName: clusterName, + ControlPlaneMachineCount: pointer.Int64(1), + WorkerMachineCount: pointer.Int64(1), + KubernetesVersion: e2eConfig.GetVariable(capi_e2e.KubernetesVersion), + }, + WaitForClusterIntervals: e2eConfig.GetIntervals(specName, "wait-cluster"), + WaitForControlPlaneIntervals: e2eConfig.GetIntervals(specName, "wait-control-plane"), + WaitForMachinePools: e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes"), + WaitForMachineDeployments: e2eConfig.GetIntervals(specName, "wait-worker-nodes"), + } + input.WaitForControlPlaneInitialized = func(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) { + Expect(ctx).NotTo(BeNil(), "ctx is required for DiscoveryAndWaitForControlPlaneInitialized") + lister := input.ClusterProxy.GetClient() + Expect(lister).ToNot(BeNil(), "Invalid argument. input.Lister can't be nil when calling DiscoveryAndWaitForControlPlaneInitialized") + var controlPlane *infrav1exp.OCIManagedControlPlane + Eventually(func(g Gomega) { + controlPlane = GetOCIManagedControlPlaneByCluster(ctx, lister, result.Cluster.Name, result.Cluster.Namespace) + if controlPlane != nil { + Log(fmt.Sprintf("Control plane is not nil, status is %t", controlPlane.Status.Ready)) + } + g.Expect(controlPlane).ToNot(BeNil()) + g.Expect(controlPlane.Status.Ready).To(BeTrue()) + }, input.WaitForControlPlaneIntervals...).Should(Succeed(), "Couldn't get the control plane ready status for the cluster %s", klog.KObj(result.Cluster)) + } + input.WaitForControlPlaneMachinesReady = func(ctx context.Context, input clusterctl.ApplyClusterTemplateAndWaitInput, result *clusterctl.ApplyClusterTemplateAndWaitResult) { + // Not applicable + } + + clusterctl.ApplyClusterTemplateAndWait(ctx, input, result) + + updateMachinePoolVersion(ctx, result.Cluster, bootstrapClusterProxy, result.MachinePools, + e2eConfig.GetIntervals(specName, "wait-machine-pool-nodes")) + }) + It("Managed Cluster - Virtual Node Pool [PRBlocking]", func() { clusterName = getClusterName(clusterNamePrefix, "virtual") input := clusterctl.ApplyClusterTemplateAndWaitInput{ @@ -308,3 +358,92 @@ func upgradeControlPlaneVersionSpec(ctx context.Context, lister client.Client, c }, WaitForControlPlaneIntervals...).Should(BeTrue()) Log("Upgrade test has completed") } + +func updateMachinePoolVersion(ctx context.Context, cluster *clusterv1.Cluster, clusterProxy framework.ClusterProxy, machinePools []*expv1.MachinePool, waitInterval []interface{}) { + var machinePool *expv1.MachinePool + for _, pool := range machinePools { + if strings.HasSuffix(pool.Name, "-1") { + machinePool = pool + break + } + } + lister := clusterProxy.GetClient() + Expect(machinePool).NotTo(BeNil()) + managedKubernetesUpgradeVersion := e2eConfig.GetVariable(ManagedKubernetesUpgradeVersion) + + patchHelper, err := patch.NewHelper(machinePool, lister) + Expect(err).ToNot(HaveOccurred()) + Expect(e2eConfig.Variables).To(HaveKey(ManagedKubernetesUpgradeVersion), "Missing %s variable in the config", ManagedKubernetesUpgradeVersion) + Log(fmt.Sprintf("Upgrade test is starting, upgrade version is %s", managedKubernetesUpgradeVersion)) + machinePool.Spec.Template.Spec.Version = &managedKubernetesUpgradeVersion + Expect(patchHelper.Patch(ctx, machinePool)).To(Succeed()) + + ociMachinePool := &infrav2exp.OCIManagedMachinePool{} + err = lister.Get(ctx, client.ObjectKey{Name: machinePool.Name, Namespace: cluster.Namespace}, ociMachinePool) + Expect(err).To(BeNil()) + patchHelper, err = patch.NewHelper(ociMachinePool, lister) + // to update a node pool, set the version and set the current image to nil so that CAPOCI will + // automatically lookup a new version + ociMachinePool.Spec.Version = &managedKubernetesUpgradeVersion + ociMachinePool.Spec.NodeSourceViaImage.ImageId = nil + Expect(err).ToNot(HaveOccurred()) + Expect(patchHelper.Patch(ctx, ociMachinePool)).To(Succeed()) + + Log("Upgrade test is starting") + + Eventually(func() (int, error) { + mpKey := client.ObjectKey{ + Namespace: machinePool.Namespace, + Name: machinePool.Name, + } + if err := lister.Get(ctx, mpKey, machinePool); err != nil { + return 0, err + } + versions := getMachinePoolInstanceVersions(ctx, clusterProxy, cluster, machinePool) + matches := 0 + for _, version := range versions { + if version == managedKubernetesUpgradeVersion { + matches++ + } + } + + if matches != len(versions) { + return 0, errors.Errorf("old version instances remain. Expected %d instances at version %v. Got version list: %v", len(versions), managedKubernetesUpgradeVersion, versions) + } + + return matches, nil + }, waitInterval...).Should(Equal(1), "Timed out waiting for all MachinePool %s instances to be upgraded to Kubernetes version %s", klog.KObj(machinePool), managedKubernetesUpgradeVersion) +} + +// getMachinePoolInstanceVersions returns the Kubernetes versions of the machine pool instances. +// This method was forked because we need to lookup the kubeconfig with each call +// as the tokens are refreshed in case of OKE +func getMachinePoolInstanceVersions(ctx context.Context, clusterProxy framework.ClusterProxy, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool) []string { + Expect(ctx).NotTo(BeNil(), "ctx is required for getMachinePoolInstanceVersions") + + instances := machinePool.Status.NodeRefs + versions := make([]string, len(instances)) + for i, instance := range instances { + node := &corev1.Node{} + var nodeGetError error + err := wait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (bool, error) { + nodeGetError = clusterProxy.GetWorkloadCluster(ctx, cluster.Namespace, cluster.Name). + GetClient().Get(ctx, client.ObjectKey{Name: instance.Name}, node) + if nodeGetError != nil { + return false, nil //nolint:nilerr + } + return true, nil + }) + if err != nil { + versions[i] = "unknown" + if nodeGetError != nil { + // Dump the instance name and error here so that we can log it as part of the version array later on. + versions[i] = fmt.Sprintf("%s error: %s", instance.Name, errors.Wrap(err, nodeGetError.Error())) + } + } else { + versions[i] = node.Status.NodeInfo.KubeletVersion + } + } + + return versions +}