Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add node pool recycling #285

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
6cf0c3d
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
62a1084
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
d2cf958
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
8578987
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
2cab41b
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
56f82aa
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
d27908b
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
d2a6f04
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
b03bf56
Add node pool recycling options
shyamradhakrishnan Jun 12, 2023
c476bd1
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
2e310c2
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
43ae662
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
668f83d
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
1427495
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
bac38de
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
f6025bb
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
89553d6
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
6dded41
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
62a4314
Add node pool recycling options
shyamradhakrishnan Jun 13, 2023
da97f3f
Add node pool recycling options
shyamradhakrishnan Jun 14, 2023
f5262fe
Add node pool recycling options
shyamradhakrishnan Jun 14, 2023
d8d9ed3
Add node pool recycling options
shyamradhakrishnan Jun 14, 2023
f9ac1bc
Add node pool recycling options
shyamradhakrishnan Jun 14, 2023
a59fcb2
Add node pool recycling options
shyamradhakrishnan Jun 19, 2023
18cfee8
Add node pool recycling options
shyamradhakrishnan Jun 21, 2023
ed6d14c
Add node pool recycling options
shyamradhakrishnan Jun 21, 2023
c5ab55b
Add node pool recycling options
shyamradhakrishnan Jun 22, 2023
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ generate-e2e-templates: $(KUSTOMIZE)
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-externally-managed-vcn --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-externally-managed-vcn.yaml
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-machine-pool --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-machine-pool.yaml
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-managed --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-managed.yaml
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-node-recycling --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-node-recycling.yaml
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-cluster-identity --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-managed-cluster-identity.yaml
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-cluster-identity --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-cluster-identity.yaml
$(KUSTOMIZE) build $(OCI_TEMPLATES)/v1beta2/cluster-template-windows-calico --load-restrictor LoadRestrictionsNone > $(OCI_TEMPLATES)/v1beta2/cluster-template-windows-calico.yaml
Expand Down
4 changes: 4 additions & 0 deletions cloud/scope/managed_control_plane.go
Original file line number Diff line number Diff line change
Expand Up @@ -588,6 +588,9 @@ func (s *ManagedControlPlaneScope) UpdateControlPlane(ctx context.Context, okeCl
// there is a chance user will edit the cluster
func setControlPlaneSpecDefaults(spec *infrav2exp.OCIManagedControlPlaneSpec) {
spec.ControlPlaneEndpoint = clusterv1.APIEndpoint{}
if spec.ClusterType == "" {
spec.ClusterType = infrav2exp.BasicClusterType
}
if spec.ImagePolicyConfig == nil {
spec.ImagePolicyConfig = &infrav2exp.ImagePolicyConfig{
IsPolicyEnabled: common.Bool(false),
Expand Down Expand Up @@ -663,6 +666,7 @@ func (s *ManagedControlPlaneScope) getSpecFromActual(cluster *oke.Cluster) *infr
spec.ClusterType = infrav2exp.EnhancedClusterType
break
default:
spec.ClusterType = infrav2exp.BasicClusterType
break
}
}
joekr marked this conversation as resolved.
Show resolved Hide resolved
Expand Down
1 change: 1 addition & 0 deletions cloud/scope/managed_control_plane_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -473,6 +473,7 @@ func TestControlPlaneUpdation(t *testing.T) {
CompartmentId: common.String("test-compartment"),
VcnId: common.String("vcn-id"),
KubernetesVersion: common.String("v1.24.5"),
Type: oke.ClusterTypeBasicCluster,
FreeformTags: tags,
DefinedTags: definedTagsInterface,
EndpointConfig: &oke.ClusterEndpointConfig{
Expand Down
50 changes: 44 additions & 6 deletions cloud/scope/managed_machine_pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,14 @@ func (m *ManagedMachinePoolScope) CreateNodePool(ctx context.Context) (*oke.Node
IsForceDeleteAfterGraceDuration: m.OCIManagedMachinePool.Spec.NodeEvictionNodePoolSettings.IsForceDeleteAfterGraceDuration,
}
}
recycleConfig := m.OCIManagedMachinePool.Spec.NodePoolCyclingDetails
if recycleConfig != nil {
nodePoolDetails.NodePoolCyclingDetails = &oke.NodePoolCyclingDetails{
IsNodeCyclingEnabled: recycleConfig.IsNodeCyclingEnabled,
MaximumSurge: recycleConfig.MaximumSurge,
MaximumUnavailable: recycleConfig.MaximumUnavailable,
}
}
nodePoolDetails.InitialNodeLabels = m.getInitialNodeKeyValuePairs()

req := oke.CreateNodePoolRequest{
Expand Down Expand Up @@ -603,16 +611,22 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke.
return false, err
}
m.Logger.Info("Node pool", "spec", jsonSpec, "actual", jsonActual)
placementConfig, err := m.buildPlacementConfig(spec.NodePoolNodeConfig.PlacementConfigs)
if err != nil {
return false, err
}

nodeConfigDetails := oke.UpdateNodePoolNodeConfigDetails{
NsgIds: m.getWorkerMachineNSGs(),
PlacementConfigs: placementConfig,
IsPvEncryptionInTransitEnabled: spec.NodePoolNodeConfig.IsPvEncryptionInTransitEnabled,
KmsKeyId: spec.NodePoolNodeConfig.KmsKeyId,
}
// send placement config only if there is an actual change in placement
// placement config and recycle config cannot be sent at the same time, and most use cases will
// be to update kubernetes version in which case, placement config is not required to be sent
joekr marked this conversation as resolved.
Show resolved Hide resolved
if !reflect.DeepEqual(spec.NodePoolNodeConfig.PlacementConfigs, actual.NodePoolNodeConfig.PlacementConfigs) {
placementConfig, err := m.buildPlacementConfig(spec.NodePoolNodeConfig.PlacementConfigs)
if err != nil {
return false, err
}
nodeConfigDetails.PlacementConfigs = placementConfig
}
if nodePoolSizeUpdateRequired {
nodeConfigDetails.Size = common.Int(int(*m.MachinePool.Spec.Replicas))
}
Expand Down Expand Up @@ -643,7 +657,9 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke.
return false, err
}
sourceDetails := oke.NodeSourceViaImageDetails{
ImageId: spec.NodeSourceViaImage.ImageId,
// use image id from machinepool spec itself as the copy will not have the image set in the
// setNodepoolImageId method above
ImageId: m.OCIManagedMachinePool.Spec.NodeSourceViaImage.ImageId,
BootVolumeSizeInGBs: spec.NodeSourceViaImage.BootVolumeSizeInGBs,
}

Expand Down Expand Up @@ -672,6 +688,19 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke.
NodeConfigDetails: &nodeConfigDetails,
NodeMetadata: spec.NodeMetadata,
}
recycleConfig := spec.NodePoolCyclingDetails
// cannot send recycle config and placement config together
if recycleConfig != nil && len(nodeConfigDetails.PlacementConfigs) == 0 {
nodePoolDetails.NodePoolCyclingDetails = &oke.NodePoolCyclingDetails{
IsNodeCyclingEnabled: recycleConfig.IsNodeCyclingEnabled,
MaximumSurge: recycleConfig.MaximumSurge,
MaximumUnavailable: recycleConfig.MaximumUnavailable,
}
}
shyamradhakrishnan marked this conversation as resolved.
Show resolved Hide resolved
if recycleConfig != nil && len(nodeConfigDetails.PlacementConfigs) != 0 {
m.Logger.V(LogLevelWarn).Info("Placement configuration has been changed in the update, " +
"hence node pool recycling configuration will not be sent with the update request")
}
if spec.NodeEvictionNodePoolSettings != nil {
nodePoolDetails.NodeEvictionNodePoolSettings = &oke.NodeEvictionNodePoolSettings{
EvictionGraceDuration: spec.NodeEvictionNodePoolSettings.EvictionGraceDuration,
Expand Down Expand Up @@ -701,6 +730,7 @@ func (m *ManagedMachinePoolScope) UpdateNodePool(ctx context.Context, pool *oke.
func setMachinePoolSpecDefaults(spec *infrav2exp.OCIManagedMachinePoolSpec) {
spec.ProviderIDList = nil
spec.ProviderID = nil

if spec.NodePoolNodeConfig != nil {
if spec.NodePoolNodeConfig.PlacementConfigs != nil {
configs := spec.NodePoolNodeConfig.PlacementConfigs
Expand Down Expand Up @@ -782,6 +812,14 @@ func (m *ManagedMachinePoolScope) getSpecFromAPIObject(pool *oke.NodePool) *expi
}
spec.NodeShapeConfig = &nodeShapeConfig
}
if pool.NodePoolCyclingDetails != nil {
cyclingDetails := pool.NodePoolCyclingDetails
spec.NodePoolCyclingDetails = &expinfra1.NodePoolCyclingDetails{
IsNodeCyclingEnabled: cyclingDetails.IsNodeCyclingEnabled,
MaximumSurge: cyclingDetails.MaximumSurge,
MaximumUnavailable: cyclingDetails.MaximumUnavailable,
}
}
return &spec
}

Expand Down
36 changes: 11 additions & 25 deletions cloud/scope/managed_machine_pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1129,15 +1129,7 @@ func TestManagedMachinePoolUpdate(t *testing.T) {
},
SshPublicKey: common.String("test-ssh-public-key"),
NodeConfigDetails: &oke.UpdateNodePoolNodeConfigDetails{
Size: common.Int(4),
PlacementConfigs: []oke.NodePoolPlacementConfigDetails{
{
AvailabilityDomain: common.String("test-ad"),
CapacityReservationId: common.String("cap-id"),
SubnetId: common.String("subnet-id"),
FaultDomains: []string{"fd-1", "fd-2"},
},
},
Size: common.Int(4),
NsgIds: []string{"nsg-id"},
KmsKeyId: common.String("kms-key-id"),
IsPvEncryptionInTransitEnabled: common.Bool(true),
Expand Down Expand Up @@ -1376,14 +1368,6 @@ func TestManagedMachinePoolUpdate(t *testing.T) {
},
SshPublicKey: common.String("test-ssh-public-key"),
NodeConfigDetails: &oke.UpdateNodePoolNodeConfigDetails{
PlacementConfigs: []oke.NodePoolPlacementConfigDetails{
{
AvailabilityDomain: common.String("test-ad"),
CapacityReservationId: common.String("cap-id"),
SubnetId: common.String("subnet-id"),
FaultDomains: []string{"fd-1", "fd-2"},
},
},
NsgIds: []string{"nsg-id"},
KmsKeyId: common.String("kms-key-id"),
IsPvEncryptionInTransitEnabled: common.Bool(true),
Expand Down Expand Up @@ -1618,6 +1602,11 @@ func TestManagedMachinePoolUpdate(t *testing.T) {
NodeSourceViaImage: &infrav2exp.NodeSourceViaImage{
ImageId: common.String("test-image-id"),
},
NodePoolCyclingDetails: &infrav2exp.NodePoolCyclingDetails{
IsNodeCyclingEnabled: common.Bool(true),
MaximumSurge: common.String("20%"),
MaximumUnavailable: common.String("10%"),
},
SshPublicKey: "test-ssh-public-key",
NodePoolNodeConfig: &infrav2exp.NodePoolNodeConfig{
PlacementConfigs: []infrav2exp.PlacementConfig{
Expand Down Expand Up @@ -1663,16 +1652,13 @@ func TestManagedMachinePoolUpdate(t *testing.T) {
NodeSourceDetails: &oke.NodeSourceViaImageDetails{
ImageId: common.String("test-image-id"),
},
NodePoolCyclingDetails: &oke.NodePoolCyclingDetails{
IsNodeCyclingEnabled: common.Bool(true),
MaximumSurge: common.String("20%"),
MaximumUnavailable: common.String("10%"),
},
SshPublicKey: common.String("test-ssh-public-key"),
NodeConfigDetails: &oke.UpdateNodePoolNodeConfigDetails{
PlacementConfigs: []oke.NodePoolPlacementConfigDetails{
{
AvailabilityDomain: common.String("test-ad"),
CapacityReservationId: common.String("cap-id"),
SubnetId: common.String("subnet-id"),
FaultDomains: []string{"fd-1", "fd-2"},
},
},
NsgIds: []string{"nsg-id"},
KmsKeyId: common.String("kms-key-id"),
IsPvEncryptionInTransitEnabled: common.Bool(true),
Expand Down
4 changes: 4 additions & 0 deletions cloud/scope/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,10 @@ import (
infrastructurev1beta2 "github.com/oracle/cluster-api-provider-oci/api/v1beta2"
)

const (
LogLevelWarn = 3
)

// GetNsgNamesFromId returns the names of the NSGs with the provided IDs
func GetNsgNamesFromId(ids []string, nsgs []*infrastructurev1beta2.NSG) []string {
names := make([]string, 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -339,6 +339,28 @@ spec:
description: NodeMetadata defines a list of key/value pairs to add
to each underlying OCI instance in the node pool on launch.
type: object
nodePoolCyclingDetails:
description: NodePoolCyclingDetails defines the node pool recycling
options.
properties:
isNodeCyclingEnabled:
description: IsNodeCyclingEnabled refers if nodes in the nodepool
will be cycled to have new changes.
type: boolean
maximumSurge:
description: MaximumSurge refers to the maximum additional new
compute instances that would be temporarily created and added
to nodepool during the cycling nodepool process. OKE supports
both integer and percentage input. Defaults to 1, Ranges from
0 to Nodepool size or 0% to 100%
type: string
maximumUnavailable:
description: Maximum active nodes that would be terminated from
nodepool during the cycling nodepool process. OKE supports both
integer and percentage input. Defaults to 0, Ranges from 0 to
Nodepool size or 0% to 100%
type: string
type: object
nodePoolNodeConfig:
description: NodePoolNodeConfig defines the configuration of nodes
in the node pool.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -308,6 +308,28 @@ spec:
to add to each underlying OCI instance in the node pool
on launch.
type: object
nodePoolCyclingDetails:
description: NodePoolCyclingDetails defines the node pool
recycling options.
properties:
isNodeCyclingEnabled:
description: IsNodeCyclingEnabled refers if nodes in the
nodepool will be cycled to have new changes.
type: boolean
maximumSurge:
description: MaximumSurge refers to the maximum additional
new compute instances that would be temporarily created
and added to nodepool during the cycling nodepool process.
OKE supports both integer and percentage input. Defaults
to 1, Ranges from 0 to Nodepool size or 0% to 100%
type: string
maximumUnavailable:
description: Maximum active nodes that would be terminated
from nodepool during the cycling nodepool process. OKE
supports both integer and percentage input. Defaults
to 0, Ranges from 0 to Nodepool size or 0% to 100%
type: string
type: object
nodePoolNodeConfig:
description: NodePoolNodeConfig defines the configuration
of nodes in the node pool.
Expand Down
4 changes: 4 additions & 0 deletions exp/api/v1beta1/conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,3 +51,7 @@ func Convert_v1beta2_NetworkDetails_To_v1beta1_NetworkDetails(in *infrastructure
func Convert_v1beta2_OCIManagedControlPlaneSpec_To_v1beta1_OCIManagedControlPlaneSpec(in *v1beta2.OCIManagedControlPlaneSpec, out *OCIManagedControlPlaneSpec, s conversion.Scope) error {
return autoConvert_v1beta2_OCIManagedControlPlaneSpec_To_v1beta1_OCIManagedControlPlaneSpec(in, out, s)
}

func Convert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(in *v1beta2.OCIManagedMachinePoolSpec, out *OCIManagedMachinePoolSpec, s conversion.Scope) error {
return autoConvert_v1beta2_OCIManagedMachinePoolSpec_To_v1beta1_OCIManagedMachinePoolSpec(in, out, s)
}
1 change: 1 addition & 0 deletions exp/api/v1beta1/ocimanagedmachinepool_conversion.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ func (src *OCIManagedMachinePool) ConvertTo(dstRaw conversion.Hub) error {
if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok {
return err
}
dst.Spec.NodePoolCyclingDetails = restored.Spec.NodePoolCyclingDetails

return nil
}
Expand Down
Loading