From 7b1951c559b8b413b7f4f017262879f76e8a55cb Mon Sep 17 00:00:00 2001 From: Jont828 Date: Mon, 12 Jun 2023 18:27:15 -0400 Subject: [PATCH] Add MachinePool Machine implementation to DockerMachines and DockerMachinePools --- .../docker/api/v1alpha3/conversion.go | 26 ++- .../api/v1alpha3/zz_generated.conversion.go | 16 +- .../docker/api/v1alpha4/conversion.go | 26 ++- .../api/v1alpha4/zz_generated.conversion.go | 16 +- .../docker/api/v1beta1/dockermachine_types.go | 7 + ...e.cluster.x-k8s.io_dockermachinepools.yaml | 50 +++++ ...cture.cluster.x-k8s.io_dockermachines.yaml | 7 + ...uster.x-k8s.io_dockermachinetemplates.yaml | 8 + .../docker/exp/api/v1alpha3/conversion.go | 29 ++- .../api/v1alpha3/zz_generated.conversion.go | 9 +- .../docker/exp/api/v1alpha4/conversion.go | 29 ++- .../api/v1alpha4/zz_generated.conversion.go | 9 +- .../api/v1beta1/dockermachinepool_types.go | 20 +- .../exp/api/v1beta1/zz_generated.deepcopy.go | 1 + .../dockermachinepool_controller.go | 210 +++++++++++++++++- .../docker/exp/internal/docker/nodepool.go | 85 ++++--- .../controllers/dockermachine_controller.go | 152 +++++++------ 17 files changed, 559 insertions(+), 141 deletions(-) diff --git a/test/infrastructure/docker/api/v1alpha3/conversion.go b/test/infrastructure/docker/api/v1alpha3/conversion.go index 6bdb64825187..e8962dee77c5 100644 --- a/test/infrastructure/docker/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/api/v1alpha3/conversion.go @@ -78,13 +78,30 @@ func (dst *DockerClusterList) ConvertFrom(srcRaw conversion.Hub) error { func (src *DockerMachine) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infrav1.DockerMachine) - return Convert_v1alpha3_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil) + if err := Convert_v1alpha3_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infrav1.DockerMachine{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Spec.InstanceName = restored.Spec.InstanceName + + return nil } func (dst *DockerMachine) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infrav1.DockerMachine) - return Convert_v1beta1_DockerMachine_To_v1alpha3_DockerMachine(src, dst, nil) + if err := Convert_v1beta1_DockerMachine_To_v1alpha3_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + return utilconversion.MarshalData(src, dst) } func (src *DockerMachineList) ConvertTo(dstRaw conversion.Hub) error { @@ -113,6 +130,7 @@ func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta + dst.Spec.Template.Spec.InstanceName = restored.Spec.Template.Spec.InstanceName return nil } @@ -154,3 +172,7 @@ func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemp // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(in, out, s) } + +func Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *infrav1.DockerMachineSpec, out *DockerMachineSpec, s apiconversion.Scope) error { + return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in, out, s) +} diff --git a/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go b/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go index d86ac186fff9..363a145af4d1 100644 --- a/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go +++ b/test/infrastructure/docker/api/v1alpha3/zz_generated.conversion.go @@ -108,11 +108,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*DockerMachineStatus)(nil), (*v1beta1.DockerMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha3_DockerMachineStatus_To_v1beta1_DockerMachineStatus(a.(*DockerMachineStatus), b.(*v1beta1.DockerMachineStatus), scope) }); err != nil { @@ -173,6 +168,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta1.DockerMachineTemplateResource)(nil), (*DockerMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha3_DockerMachineTemplateResource(a.(*v1beta1.DockerMachineTemplateResource), b.(*DockerMachineTemplateResource), scope) }); err != nil { @@ -480,6 +480,7 @@ func Convert_v1alpha3_DockerMachineSpec_To_v1beta1_DockerMachineSpec(in *DockerM } func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { + // WARNING: in.InstanceName requires manual conversion: does not exist in peer-type out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.CustomImage = in.CustomImage out.PreLoadImages = *(*[]string)(unsafe.Pointer(&in.PreLoadImages)) @@ -488,11 +489,6 @@ func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *v1b return nil } -// Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha3_DockerMachineSpec(in, out, s) -} - func autoConvert_v1alpha3_DockerMachineStatus_To_v1beta1_DockerMachineStatus(in *DockerMachineStatus, out *v1beta1.DockerMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.LoadBalancerConfigured = in.LoadBalancerConfigured diff --git a/test/infrastructure/docker/api/v1alpha4/conversion.go b/test/infrastructure/docker/api/v1alpha4/conversion.go index a1b42d8f4d53..14e1324600df 100644 --- a/test/infrastructure/docker/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/conversion.go @@ -96,13 +96,30 @@ func (dst *DockerClusterTemplateList) ConvertFrom(srcRaw conversion.Hub) error { func (src *DockerMachine) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infrav1.DockerMachine) - return Convert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil) + if err := Convert_v1alpha4_DockerMachine_To_v1beta1_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infrav1.DockerMachine{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Spec.InstanceName = restored.Spec.InstanceName + + return nil } func (dst *DockerMachine) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infrav1.DockerMachine) - return Convert_v1beta1_DockerMachine_To_v1alpha4_DockerMachine(src, dst, nil) + if err := Convert_v1beta1_DockerMachine_To_v1alpha4_DockerMachine(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + return utilconversion.MarshalData(src, dst) } func (src *DockerMachineList) ConvertTo(dstRaw conversion.Hub) error { @@ -131,6 +148,7 @@ func (src *DockerMachineTemplate) ConvertTo(dstRaw conversion.Hub) error { } dst.Spec.Template.ObjectMeta = restored.Spec.Template.ObjectMeta + dst.Spec.Template.Spec.InstanceName = restored.Spec.Template.Spec.InstanceName return nil } @@ -171,3 +189,7 @@ func Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemp // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. return autoConvert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(in, out, s) } + +func Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *infrav1.DockerMachineSpec, out *DockerMachineSpec, s apiconversion.Scope) error { + return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in, out, s) +} diff --git a/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go b/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go index 9594641da789..650dcc92a355 100644 --- a/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go +++ b/test/infrastructure/docker/api/v1alpha4/zz_generated.conversion.go @@ -158,11 +158,6 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*DockerMachineStatus)(nil), (*v1beta1.DockerMachineStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha4_DockerMachineStatus_To_v1beta1_DockerMachineStatus(a.(*DockerMachineStatus), b.(*v1beta1.DockerMachineStatus), scope) }); err != nil { @@ -233,6 +228,11 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } + if err := s.AddConversionFunc((*v1beta1.DockerMachineSpec)(nil), (*DockerMachineSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(a.(*v1beta1.DockerMachineSpec), b.(*DockerMachineSpec), scope) + }); err != nil { + return err + } if err := s.AddConversionFunc((*v1beta1.DockerMachineTemplateResource)(nil), (*DockerMachineTemplateResource)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachineTemplateResource_To_v1alpha4_DockerMachineTemplateResource(a.(*v1beta1.DockerMachineTemplateResource), b.(*DockerMachineTemplateResource), scope) }); err != nil { @@ -686,6 +686,7 @@ func Convert_v1alpha4_DockerMachineSpec_To_v1beta1_DockerMachineSpec(in *DockerM } func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { + // WARNING: in.InstanceName requires manual conversion: does not exist in peer-type out.ProviderID = (*string)(unsafe.Pointer(in.ProviderID)) out.CustomImage = in.CustomImage out.PreLoadImages = *(*[]string)(unsafe.Pointer(&in.PreLoadImages)) @@ -694,11 +695,6 @@ func autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *v1b return nil } -// Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec is an autogenerated conversion function. -func Convert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in *v1beta1.DockerMachineSpec, out *DockerMachineSpec, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachineSpec_To_v1alpha4_DockerMachineSpec(in, out, s) -} - func autoConvert_v1alpha4_DockerMachineStatus_To_v1beta1_DockerMachineStatus(in *DockerMachineStatus, out *v1beta1.DockerMachineStatus, s conversion.Scope) error { out.Ready = in.Ready out.LoadBalancerConfigured = in.LoadBalancerConfigured diff --git a/test/infrastructure/docker/api/v1beta1/dockermachine_types.go b/test/infrastructure/docker/api/v1beta1/dockermachine_types.go index d99e30be5455..f798d56d73cc 100644 --- a/test/infrastructure/docker/api/v1beta1/dockermachine_types.go +++ b/test/infrastructure/docker/api/v1beta1/dockermachine_types.go @@ -30,6 +30,13 @@ const ( // DockerMachineSpec defines the desired state of DockerMachine. type DockerMachineSpec struct { + // InstanceName indicates the name of the Docker container associated with this DockerMachine. + // Since the provider ID is not set until after the container is online, this field is used to + // maintain the association. If it is not populated, the name of the container will be the same + // as the name of the owner Machine. + // +optional + InstanceName string `json:"instanceName,omitempty"` + // ProviderID will be the container name in ProviderID format (docker:////) // +optional ProviderID *string `json:"providerID,omitempty"` diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml index c8b0e92675c3..47500437dff7 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinepools.yaml @@ -503,6 +503,56 @@ spec: - type type: object type: array + infrastructureMachineKind: + description: InfrastructureMachineKind is the kind of the infrastructure + resources behind MachinePool Machines. + type: string + infrastructureMachineSelector: + description: 'InfrastructureMachineSelector is a label query over + the infrastructure resources behind MachinePool Machines. More info: + https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors' + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. + The requirements are ANDed. + items: + description: A label selector requirement is a selector that + contains values, a key, and an operator that relates the key + and values. + properties: + key: + description: key is the label key that the selector applies + to. + type: string + operator: + description: operator represents a key's relationship to + a set of values. Valid operators are In, NotIn, Exists + and DoesNotExist. + type: string + values: + description: values is an array of string values. If the + operator is In or NotIn, the values array must be non-empty. + If the operator is Exists or DoesNotExist, the values + array must be empty. This array is replaced during a strategic + merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single + {key,value} in the matchLabels map is equivalent to an element + of matchExpressions, whose key field is "key", the operator + is "In", and the values array contains only "value". The requirements + are ANDed. + type: object + type: object instances: description: Instances contains the status for each instance in the pool diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml index 5dc540fed23a..bfbda562e6da 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachines.yaml @@ -379,6 +379,13 @@ spec: type: boolean type: object type: array + instanceName: + description: InstanceName indicates the name of the Docker container + associated with this DockerMachine. Since the provider ID is not + set until after the container is online, this field is used to maintain + the association. If it is not populated, the name of the container + will be the same as the name of the owner Machine. + type: string preLoadImages: description: PreLoadImages allows to pre-load images in a newly created machine. This can be used to speed up tests by avoiding e.g. to diff --git a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml index bf9afa14a380..8e918450e07b 100644 --- a/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml +++ b/test/infrastructure/docker/config/crd/bases/infrastructure.cluster.x-k8s.io_dockermachinetemplates.yaml @@ -274,6 +274,14 @@ spec: type: boolean type: object type: array + instanceName: + description: InstanceName indicates the name of the Docker + container associated with this DockerMachine. Since the + provider ID is not set until after the container is online, + this field is used to maintain the association. If it is + not populated, the name of the container will be the same + as the name of the owner Machine. + type: string preLoadImages: description: PreLoadImages allows to pre-load images in a newly created machine. This can be used to speed up tests diff --git a/test/infrastructure/docker/exp/api/v1alpha3/conversion.go b/test/infrastructure/docker/exp/api/v1alpha3/conversion.go index 3eeb15387b8b..9816990b0c48 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/conversion.go @@ -17,21 +17,41 @@ limitations under the License. package v1alpha3 import ( + apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1alpha3_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + if err := Convert_v1alpha3_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infraexpv1.DockerMachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Status.InfrastructureMachineKind = restored.Status.InfrastructureMachineKind + dst.Status.InfrastructureMachineSelector = restored.Status.InfrastructureMachineSelector + + return nil } func (dst *DockerMachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1beta1_DockerMachinePool_To_v1alpha3_DockerMachinePool(src, dst, nil) + if err := Convert_v1beta1_DockerMachinePool_To_v1alpha3_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + return utilconversion.MarshalData(src, dst) } func (src *DockerMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { @@ -45,3 +65,8 @@ func (dst *DockerMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta1_DockerMachinePoolList_To_v1alpha3_DockerMachinePoolList(src, dst, nil) } + +func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in *infraexpv1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s apiconversion.Scope) error { + // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. + return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in, out, s) +} diff --git a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go index 311ad6ada301..2d140d5072c9 100644 --- a/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha3/zz_generated.conversion.go @@ -95,7 +95,7 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + if err := s.AddConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) }); err != nil { return err @@ -339,10 +339,7 @@ func autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolSt } else { out.Conditions = nil } + // WARNING: in.InfrastructureMachineSelector requires manual conversion: does not exist in peer-type + // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type return nil } - -// Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in *v1beta1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha3_DockerMachinePoolStatus(in, out, s) -} diff --git a/test/infrastructure/docker/exp/api/v1alpha4/conversion.go b/test/infrastructure/docker/exp/api/v1alpha4/conversion.go index acc200d9e51a..90a7cebadb20 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/conversion.go @@ -17,21 +17,41 @@ limitations under the License. package v1alpha4 import ( + apiconversion "k8s.io/apimachinery/pkg/conversion" "sigs.k8s.io/controller-runtime/pkg/conversion" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" + utilconversion "sigs.k8s.io/cluster-api/util/conversion" ) func (src *DockerMachinePool) ConvertTo(dstRaw conversion.Hub) error { dst := dstRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1alpha4_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil) + if err := Convert_v1alpha4_DockerMachinePool_To_v1beta1_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Manually restore data. + restored := &infraexpv1.DockerMachinePool{} + if ok, err := utilconversion.UnmarshalData(src, restored); err != nil || !ok { + return err + } + + dst.Status.InfrastructureMachineKind = restored.Status.InfrastructureMachineKind + dst.Status.InfrastructureMachineSelector = restored.Status.InfrastructureMachineSelector + + return nil } func (dst *DockerMachinePool) ConvertFrom(srcRaw conversion.Hub) error { src := srcRaw.(*infraexpv1.DockerMachinePool) - return Convert_v1beta1_DockerMachinePool_To_v1alpha4_DockerMachinePool(src, dst, nil) + if err := Convert_v1beta1_DockerMachinePool_To_v1alpha4_DockerMachinePool(src, dst, nil); err != nil { + return err + } + + // Preserve Hub data on down-conversion except for metadata + return utilconversion.MarshalData(src, dst) } func (src *DockerMachinePoolList) ConvertTo(dstRaw conversion.Hub) error { @@ -45,3 +65,8 @@ func (dst *DockerMachinePoolList) ConvertFrom(srcRaw conversion.Hub) error { return Convert_v1beta1_DockerMachinePoolList_To_v1alpha4_DockerMachinePoolList(src, dst, nil) } + +func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in *infraexpv1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s apiconversion.Scope) error { + // NOTE: custom conversion func is required because spec.template.metadata has been added in v1beta1. + return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in, out, s) +} diff --git a/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go b/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go index 4f302088997d..612a615db8a5 100644 --- a/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go +++ b/test/infrastructure/docker/exp/api/v1alpha4/zz_generated.conversion.go @@ -95,7 +95,7 @@ func RegisterConversions(s *runtime.Scheme) error { }); err != nil { return err } - if err := s.AddGeneratedConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + if err := s.AddConversionFunc((*v1beta1.DockerMachinePoolStatus)(nil), (*DockerMachinePoolStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(a.(*v1beta1.DockerMachinePoolStatus), b.(*DockerMachinePoolStatus), scope) }); err != nil { return err @@ -339,10 +339,7 @@ func autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolSt } else { out.Conditions = nil } + // WARNING: in.InfrastructureMachineSelector requires manual conversion: does not exist in peer-type + // WARNING: in.InfrastructureMachineKind requires manual conversion: does not exist in peer-type return nil } - -// Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus is an autogenerated conversion function. -func Convert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in *v1beta1.DockerMachinePoolStatus, out *DockerMachinePoolStatus, s conversion.Scope) error { - return autoConvert_v1beta1_DockerMachinePoolStatus_To_v1alpha4_DockerMachinePoolStatus(in, out, s) -} diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go index 273475366b4b..055243311431 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepool_types.go @@ -26,6 +26,9 @@ import ( const ( // MachinePoolFinalizer allows ReconcileDockerMachinePool to clean up resources. MachinePoolFinalizer = "dockermachinepool.infrastructure.cluster.x-k8s.io" + + // DockerMachinePoolNameLabel is the label indicating the name of the DockerMachinePool a DockerMachine is controleld by. + DockerMachinePoolNameLabel = "dockermachinepool.infrastructure.cluster.x-k8s.io/pool-name" ) // DockerMachinePoolMachineTemplate defines the desired state of DockerMachine. @@ -82,6 +85,15 @@ type DockerMachinePoolStatus struct { // Conditions defines current service state of the DockerMachinePool. // +optional Conditions clusterv1.Conditions `json:"conditions,omitempty"` + + // InfrastructureMachineSelector is a label query over the infrastructure resources behind MachinePool Machines. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + InfrastructureMachineSelector metav1.LabelSelector `json:"infrastructureMachineSelector,omitempty"` + + // InfrastructureMachineKind is the kind of the infrastructure resources behind MachinePool Machines. + // +optional + InfrastructureMachineKind string `json:"infrastructureMachineKind,omitempty"` } // DockerMachinePoolInstanceStatus contains status information about a DockerMachinePool. @@ -130,13 +142,13 @@ type DockerMachinePool struct { } // GetConditions returns the set of conditions for this object. -func (c *DockerMachinePool) GetConditions() clusterv1.Conditions { - return c.Status.Conditions +func (d *DockerMachinePool) GetConditions() clusterv1.Conditions { + return d.Status.Conditions } // SetConditions sets the conditions on this object. -func (c *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { - c.Status.Conditions = conditions +func (d *DockerMachinePool) SetConditions(conditions clusterv1.Conditions) { + d.Status.Conditions = conditions } // +kubebuilder:object:root=true diff --git a/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go b/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go index cf1e613a00d1..8a92065572e9 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go +++ b/test/infrastructure/docker/exp/api/v1beta1/zz_generated.deepcopy.go @@ -179,6 +179,7 @@ func (in *DockerMachinePoolStatus) DeepCopyInto(out *DockerMachinePoolStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + in.InfrastructureMachineSelector.DeepCopyInto(&out.InfrastructureMachineSelector) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DockerMachinePoolStatus. diff --git a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go index e7836e13c45a..4b9fd0b13b7b 100644 --- a/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go +++ b/test/infrastructure/docker/exp/internal/controllers/dockermachinepool_controller.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" "k8s.io/utils/pointer" @@ -39,9 +40,11 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" utilexp "sigs.k8s.io/cluster-api/exp/util" "sigs.k8s.io/cluster-api/test/infrastructure/container" + infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" infraexpv1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/api/v1beta1" "sigs.k8s.io/cluster-api/test/infrastructure/docker/exp/internal/docker" "sigs.k8s.io/cluster-api/util" + "sigs.k8s.io/cluster-api/util/conditions" "sigs.k8s.io/cluster-api/util/patch" "sigs.k8s.io/cluster-api/util/predicates" ) @@ -109,6 +112,18 @@ func (r *DockerMachinePoolReconciler) Reconcile(ctx context.Context, req ctrl.Re return ctrl.Result{}, err } + dockerMachinePool.Status.InfrastructureMachineSelector = metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterNameLabel: cluster.Name, + infraexpv1.DockerMachinePoolNameLabel: dockerMachinePool.Name, + }, + } + dockerMachinePool.Status.InfrastructureMachineKind = "DockerMachine" + // Patch now so that the status and selectors are available. + if err := patchHelper.Patch(ctx, dockerMachinePool); err != nil { + return ctrl.Result{}, err + } + // Always attempt to Patch the DockerMachinePool object and status after each reconciliation. defer func() { if err := patchDockerMachinePool(ctx, patchHelper, dockerMachinePool); err != nil { @@ -151,7 +166,7 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr err = ctrl.NewControllerManagedBy(mgr). For(&infraexpv1.DockerMachinePool{}). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). Watches( &expv1.MachinePool{}, handler.EnqueueRequestsFromMapFunc(utilexp.MachinePoolToInfrastructureMapFunc( @@ -171,7 +186,40 @@ func (r *DockerMachinePoolReconciler) SetupWithManager(ctx context.Context, mgr } func (r *DockerMachinePoolReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool) (ctrl.Result, error) { - pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool) + log := ctrl.LoggerFrom(ctx) + + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return ctrl.Result{}, err + } + + nodePoolInstances := make([]docker.NodePoolInstance, len(dockerMachineList.Items)) + for i, dockerMachine := range dockerMachineList.Items { + machine, err := util.GetOwnerMachine(ctx, r.Client, dockerMachine.ObjectMeta) + if err != nil { + log.V(2).Error(err, "failed to get owner machine, skipping") + continue + } + if machine == nil { + log.V(2).Info("owner machine not found, skipping", "docker-machine", dockerMachine.Name) + continue + } + + hasDeleteAnnotation := false + if machine.Annotations != nil { + _, hasDeleteAnnotation = machine.Annotations[clusterv1.DeleteMachineAnnotation] + } + + nodePoolInstances[i] = docker.NodePoolInstance{ + InstanceName: dockerMachine.Spec.InstanceName, + Bootstrapped: dockerMachine.Spec.Bootstrapped, + ProviderID: dockerMachine.Spec.ProviderID, + PrioritizeDelete: hasDeleteAnnotation, + Addresses: dockerMachine.Status.Addresses, + } + } + + pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool, nodePoolInstances) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") } @@ -197,7 +245,32 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust machinePool.Spec.Replicas = pointer.Int32(1) } - pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool) + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return ctrl.Result{}, err + } + + // Since nodepools don't persist the instances list, we need to construct it from the list of DockerMachines. + nodePoolInstances := make([]docker.NodePoolInstance, len(dockerMachineList.Items)) + for i, dockerMachine := range dockerMachineList.Items { + machine, err := util.GetOwnerMachine(ctx, r.Client, dockerMachine.ObjectMeta) + if err != nil { + log.V(2).Error(err, "Failed to get owner machine, skipping") + } + var hasDeleteAnnotation bool + if machine != nil { + _, hasDeleteAnnotation = machine.Annotations[clusterv1.DeleteMachineAnnotation] + } + + nodePoolInstances[i] = docker.NodePoolInstance{ + InstanceName: dockerMachine.Spec.InstanceName, + Bootstrapped: dockerMachine.Spec.Bootstrapped, + ProviderID: dockerMachine.Spec.ProviderID, + PrioritizeDelete: hasDeleteAnnotation, + } + } + + pool, err := docker.NewNodePool(ctx, r.Client, cluster, machinePool, dockerMachinePool, nodePoolInstances) if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to build new node pool") } @@ -207,20 +280,35 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust if err != nil { return ctrl.Result{}, errors.Wrap(err, "failed to generate workload cluster client") } + res, err := pool.ReconcileMachines(ctx, remoteClient) if err != nil { return res, err } + nodePoolInstancesResult := pool.GetNodePoolInstances() + // Derive info from Status.Instances dockerMachinePool.Spec.ProviderIDList = []string{} - for _, instance := range dockerMachinePool.Status.Instances { - if instance.ProviderID != nil && instance.Ready { + for _, instance := range nodePoolInstancesResult { + if instance.ProviderID != nil { dockerMachinePool.Spec.ProviderIDList = append(dockerMachinePool.Spec.ProviderIDList, *instance.ProviderID) } } - dockerMachinePool.Status.Replicas = int32(len(dockerMachinePool.Status.Instances)) + // Delete all DockerMachines that are not in the list of instances returned by the node pool. + if err := r.DeleteDanglingDockerMachines(ctx, dockerMachinePool, nodePoolInstancesResult); err != nil { + conditions.MarkFalse(dockerMachinePool, clusterv1.ReadyCondition, "FailedToDeleteOrphanedMachines", clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, errors.Wrap(err, "failed to delete orphaned machines") + } + + // Create a DockerMachine for each instance returned by the node pool if it doesn't exist. + if err := r.CreateDockerMachinesIfNotExists(ctx, machinePool, dockerMachinePool, nodePoolInstancesResult); err != nil { + conditions.MarkFalse(dockerMachinePool, clusterv1.ReadyCondition, "FailedToCreateNewMachines", clusterv1.ConditionSeverityWarning, err.Error()) + return ctrl.Result{}, errors.Wrap(err, "failed to create missing machines") + } + + dockerMachinePool.Status.Replicas = int32(len(dockerMachineList.Items)) if dockerMachinePool.Spec.ProviderID == "" { // This is a fake provider ID which does not tie back to any docker infrastructure. In cloud providers, @@ -229,25 +317,129 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust dockerMachinePool.Spec.ProviderID = getDockerMachinePoolProviderID(cluster.Name, dockerMachinePool.Name) } - dockerMachinePool.Status.Ready = len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) + if len(dockerMachinePool.Spec.ProviderIDList) == int(*machinePool.Spec.Replicas) { + dockerMachinePool.Status.Ready = true + conditions.MarkTrue(dockerMachinePool, expv1.ReplicasReadyCondition) + + return ctrl.Result{}, nil + } + + dockerMachinePool.Status.Ready = false + conditions.MarkFalse(dockerMachinePool, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "") // if some machine is still provisioning, force reconcile in few seconds to check again infrastructure. - if !dockerMachinePool.Status.Ready && res.IsZero() { + if res.IsZero() { return ctrl.Result{RequeueAfter: 10 * time.Second}, nil } + return res, nil } +func getDockerMachines(ctx context.Context, c client.Client, dockerMachinePool infraexpv1.DockerMachinePool) (*infrav1.DockerMachineList, error) { + dockerMachineList := &infrav1.DockerMachineList{} + labels := dockerMachinePool.Status.InfrastructureMachineSelector.MatchLabels + if err := c.List(ctx, dockerMachineList, client.InNamespace(dockerMachinePool.Namespace), client.MatchingLabels(labels)); err != nil { + return nil, err + } + + return dockerMachineList, nil +} + +// DeleteDanglingDockerMachines deletes any DockerMachines owned by the DockerMachinePool that reference an invalid providerID, i.e. not in the latest copy of the node pool instances. +func (r *DockerMachinePoolReconciler) DeleteDanglingDockerMachines(ctx context.Context, dockerMachinePool *infraexpv1.DockerMachinePool, instances []docker.NodePoolInstance) error { + log := ctrl.LoggerFrom(ctx) + log.V(2).Info("Deleting orphaned machines", "dockerMachinePool", dockerMachinePool.Name, "namespace", dockerMachinePool.Namespace, "instances", instances) + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return err + } + + log.V(2).Info("DockerMachineList kind is", "kind", dockerMachineList.GetObjectKind()) + + instanceNameSet := map[string]struct{}{} + for _, instance := range instances { + instanceNameSet[instance.InstanceName] = struct{}{} + } + + for i := range dockerMachineList.Items { + dockerMachine := &dockerMachineList.Items[i] + if _, ok := instanceNameSet[dockerMachine.Spec.InstanceName]; !ok { + log.V(2).Info("Deleting orphaned DockerMachine", "dockerMachine", dockerMachine.Name, "namespace", dockerMachine.Namespace) + if err := r.Client.Delete(ctx, dockerMachine); err != nil { + return err + } + } else { + log.V(2).Info("Keeping DockerMachine, nothing to do", "dockerMachine", dockerMachine.Name, "namespace", dockerMachine.Namespace) + } + } + + return nil +} + +// CreateDockerMachinesIfNotExists creates a DockerMachine for each instance returned by the node pool if it doesn't exist. +func (r *DockerMachinePoolReconciler) CreateDockerMachinesIfNotExists(ctx context.Context, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool, instances []docker.NodePoolInstance) error { + log := ctrl.LoggerFrom(ctx) + + log.V(2).Info("Creating missing machines", "dockerMachinePool", dockerMachinePool.Name, "namespace", dockerMachinePool.Namespace, "instances", instances) + + dockerMachineList, err := getDockerMachines(ctx, r.Client, *dockerMachinePool) + if err != nil { + return err + } + + instanceNameToDockerMachine := make(map[string]infrav1.DockerMachine) + for _, dockerMachine := range dockerMachineList.Items { + instanceNameToDockerMachine[dockerMachine.Spec.InstanceName] = dockerMachine + } + + for _, instance := range instances { + if _, exists := instanceNameToDockerMachine[instance.InstanceName]; exists { + continue + } + + labels := dockerMachinePool.Status.InfrastructureMachineSelector.MatchLabels + labels[clusterv1.MachinePoolNameLabel] = machinePool.Name + dockerMachine := &infrav1.DockerMachine{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: dockerMachinePool.Namespace, + GenerateName: fmt.Sprintf("%s-", dockerMachinePool.Name), + Labels: labels, + Annotations: make(map[string]string), + // Note: This DockerMachine will be owned by the DockerMachinePool until the MachinePool controller creates its parent Machine. + }, + Spec: infrav1.DockerMachineSpec{ + InstanceName: instance.InstanceName, + }, + } + + log.V(2).Info("Instance name for dockerMachine is", "instanceName", instance.InstanceName, "dockerMachine", dockerMachine.Name) + + if err := r.Client.Create(ctx, dockerMachine); err != nil { + return errors.Wrap(err, "failed to create dockerMachine") + } + } + + return nil +} + func getDockerMachinePoolProviderID(clusterName, dockerMachinePoolName string) string { return fmt.Sprintf("docker:////%s-dmp-%s", clusterName, dockerMachinePoolName) } func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infraexpv1.DockerMachinePool) error { - // TODO: add conditions + conditions.SetSummary(dockerMachinePool, + conditions.WithConditions( + expv1.ReplicasReadyCondition, + ), + ) // Patch the object, ignoring conflicts on the conditions owned by this controller. return patchHelper.Patch( ctx, dockerMachinePool, + patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{ + clusterv1.ReadyCondition, + expv1.ReplicasReadyCondition, + }}, ) } diff --git a/test/infrastructure/docker/exp/internal/docker/nodepool.go b/test/infrastructure/docker/exp/internal/docker/nodepool.go index ba99ac4e2a45..7e18798156ab 100644 --- a/test/infrastructure/docker/exp/internal/docker/nodepool.go +++ b/test/infrastructure/docker/exp/internal/docker/nodepool.go @@ -22,6 +22,7 @@ import ( "encoding/base64" "fmt" "math/rand" + "sort" "strings" "time" @@ -55,16 +56,46 @@ type NodePool struct { dockerMachinePool *infraexpv1.DockerMachinePool labelFilters map[string]string machines []*docker.Machine + nodePoolInstances []NodePoolInstance // Note: This must be initialized when creating a new node pool and updated to reflect the `machines` slice. +} + +// NodePoolInstance is a representation of a node pool instance used to provide the information to construct DockerMachines. +type NodePoolInstance struct { + InstanceName string + Bootstrapped bool + ProviderID *string + PrioritizeDelete bool + Addresses []clusterv1.MachineAddress + Ready bool +} + +func (np NodePool) Len() int { return len(np.machines) } +func (np NodePool) Swap(i, j int) { np.machines[i], np.machines[j] = np.machines[j], np.machines[i] } +func (np NodePool) Less(i, j int) bool { + var instanceI, instanceJ NodePoolInstance + for _, instance := range np.nodePoolInstances { + if instance.InstanceName == np.machines[i].Name() { + instanceI = instance + } + if instance.InstanceName == np.machines[j].Name() { + instanceJ = instance + } + } + if instanceI.PrioritizeDelete == instanceJ.PrioritizeDelete { + return instanceI.InstanceName < instanceJ.InstanceName + } + return instanceJ.PrioritizeDelete } // NewNodePool creates a new node pool instances. -func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *expv1.MachinePool, dmp *infraexpv1.DockerMachinePool) (*NodePool, error) { +func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluster, mp *expv1.MachinePool, dmp *infraexpv1.DockerMachinePool, nodePoolInstances []NodePoolInstance) (*NodePool, error) { np := &NodePool{ client: c, cluster: cluster, machinePool: mp, dockerMachinePool: dmp, labelFilters: map[string]string{dockerMachinePoolLabel: dmp.Name}, + nodePoolInstances: nodePoolInstances, } if err := np.refresh(ctx); err != nil { @@ -73,6 +104,11 @@ func NewNodePool(ctx context.Context, c client.Client, cluster *clusterv1.Cluste return np, nil } +// GetNodePoolInstances returns the node pool instances providing the information to construct DockerMachines. +func (np *NodePool) GetNodePoolInstances() []NodePoolInstance { + return np.nodePoolInstances +} + // ReconcileMachines will build enough machines to satisfy the machine pool / docker machine pool spec // eventually delete all the machine in excess, and update the status for all the machines. // @@ -126,9 +162,9 @@ func (np *NodePool) ReconcileMachines(ctx context.Context, remoteClient client.C // First remove instance status for machines no longer existing, then reconcile the existing machines. // NOTE: the status is the only source of truth for understanding if the machine is already bootstrapped, ready etc. // so we are preserving the existing status and using it as a bases for the next reconcile machine. - instances := make([]infraexpv1.DockerMachinePoolInstanceStatus, 0, len(np.machines)) - for i := range np.dockerMachinePool.Status.Instances { - instance := np.dockerMachinePool.Status.Instances[i] + instances := make([]NodePoolInstance, 0, len(np.machines)) + for i := range np.nodePoolInstances { + instance := np.nodePoolInstances[i] for j := range np.machines { if instance.InstanceName == np.machines[j].Name() { instances = append(instances, instance) @@ -136,7 +172,7 @@ func (np *NodePool) ReconcileMachines(ctx context.Context, remoteClient client.C } } } - np.dockerMachinePool.Status.Instances = instances + np.nodePoolInstances = instances result := ctrl.Result{} for i := range np.machines { @@ -164,6 +200,8 @@ func (np *NodePool) Delete(ctx context.Context) error { } } + // Note: We can set `np.nodePoolInstances = nil` here, but it shouldn't be necessary on Delete(). + return nil } @@ -237,6 +275,9 @@ func (np *NodePool) refresh(ctx context.Context) error { np.machines = append(np.machines, machine) } } + + sort.Sort(np) + return nil } @@ -244,29 +285,28 @@ func (np *NodePool) refresh(ctx context.Context) error { func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machine, remoteClient client.Client) (ctrl.Result, error) { log := ctrl.LoggerFrom(ctx) - var machineStatus infraexpv1.DockerMachinePoolInstanceStatus + var nodePoolInstance NodePoolInstance isFound := false - for _, instanceStatus := range np.dockerMachinePool.Status.Instances { - if instanceStatus.InstanceName == machine.Name() { - machineStatus = instanceStatus + for _, instance := range np.nodePoolInstances { + if instance.InstanceName == machine.Name() { + nodePoolInstance = instance isFound = true } } if !isFound { log.Info("Creating instance record", "instance", machine.Name()) - machineStatus = infraexpv1.DockerMachinePoolInstanceStatus{ + nodePoolInstance = NodePoolInstance{ InstanceName: machine.Name(), - Version: np.machinePool.Spec.Template.Spec.Version, } - np.dockerMachinePool.Status.Instances = append(np.dockerMachinePool.Status.Instances, machineStatus) + np.nodePoolInstances = append(np.nodePoolInstances, nodePoolInstance) // return to surface the new machine exists. return ctrl.Result{Requeue: true}, nil } defer func() { - for i, instanceStatus := range np.dockerMachinePool.Status.Instances { - if instanceStatus.InstanceName == machine.Name() { - np.dockerMachinePool.Status.Instances[i] = machineStatus + for i, instance := range np.nodePoolInstances { + if instance.InstanceName == machine.Name() { + np.nodePoolInstances[i] = instance } } }() @@ -277,7 +317,7 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin } // if the machine isn't bootstrapped, only then run bootstrap scripts - if !machineStatus.Bootstrapped { + if !nodePoolInstance.Bootstrapped { timeoutCtx, cancel := context.WithTimeout(ctx, 3*time.Minute) defer cancel() @@ -305,13 +345,12 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin return ctrl.Result{}, errors.Wrap(err, "failed to check for existence of bootstrap success file at /run/cluster-api/bootstrap-success.complete") } } - machineStatus.Bootstrapped = true // return to surface the machine has been bootstrapped. return ctrl.Result{Requeue: true}, nil } - if machineStatus.Addresses == nil { + if nodePoolInstance.Addresses == nil { log.Info("Fetching instance addresses", "instance", machine.Name()) // set address in machine status machineAddresses, err := externalMachine.Address(ctx) @@ -321,14 +360,14 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin return ctrl.Result{Requeue: true}, nil //nolint:nilerr } - machineStatus.Addresses = []clusterv1.MachineAddress{ + nodePoolInstance.Addresses = []clusterv1.MachineAddress{ { Type: clusterv1.MachineHostName, Address: externalMachine.ContainerName(), }, } for _, addr := range machineAddresses { - machineStatus.Addresses = append(machineStatus.Addresses, + nodePoolInstance.Addresses = append(nodePoolInstance.Addresses, clusterv1.MachineAddress{ Type: clusterv1.MachineInternalIP, Address: addr, @@ -340,7 +379,7 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin } } - if machineStatus.ProviderID == nil { + if nodePoolInstance.ProviderID == nil { log.Info("Fetching instance provider ID", "instance", machine.Name()) // Usually a cloud provider will do this, but there is no docker-cloud provider. // Requeue if there is an error, as this is likely momentary load balancer @@ -349,12 +388,8 @@ func (np *NodePool) reconcileMachine(ctx context.Context, machine *docker.Machin log.V(4).Info("transient error setting the provider id") return ctrl.Result{Requeue: true}, nil //nolint:nilerr } - // Set ProviderID so the Cluster API Machine Controller can pull it - providerID := externalMachine.ProviderID() - machineStatus.ProviderID = &providerID } - machineStatus.Ready = true return ctrl.Result{}, nil } diff --git a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go index dd3e513148cd..750e8e2def99 100644 --- a/test/infrastructure/docker/internal/controllers/dockermachine_controller.go +++ b/test/infrastructure/docker/internal/controllers/dockermachine_controller.go @@ -149,8 +149,20 @@ func (r *DockerMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques return ctrl.Result{}, nil } + if _, hasDeleteAnnotation := machine.Annotations[clusterv1.DeleteMachineAnnotation]; hasDeleteAnnotation { + if dockerMachine.Annotations == nil { + dockerMachine.Annotations = map[string]string{} + } + dockerMachine.Annotations[clusterv1.DeleteMachineAnnotation] = "true" + } + + if dockerMachine.Spec.InstanceName == "" { + log.V(2).Info("InstanceName not set for DockerMachine, defaulting to owner machine name", "machine", machine.Name) + dockerMachine.Spec.InstanceName = machine.Name + } + // Create a helper for managing the docker container hosting the machine. - externalMachine, err := docker.NewMachine(ctx, cluster, machine.Name, nil) + externalMachine, err := docker.NewMachine(ctx, cluster, dockerMachine.Spec.InstanceName, nil) if err != nil { return ctrl.Result{}, errors.Wrapf(err, "failed to create helper for managing the externalMachine") } @@ -213,12 +225,22 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * return ctrl.Result{}, nil } + // Be sure to include the MachinePool label in the DockerMachine along with the selector labels. + _, machinePoolOwned := dockerMachine.Labels[clusterv1.MachinePoolNameLabel] + // if the machine is already provisioned, return if dockerMachine.Spec.ProviderID != nil { // ensure ready state is set. // This is required after move, because status is not moved to the target cluster. dockerMachine.Status.Ready = true + // We only create a DockerMachinePoolMachine if the providerID is set, which implies that the bootstrap has succeeded. + // TODO: verify this logic + if machinePoolOwned { + log.V(2).Info("Marking bootstrap true for machine pool owned machine") + conditions.MarkTrue(dockerMachine, infrav1.BootstrapExecSucceededCondition) + } + if externalMachine.Exists() { conditions.MarkTrue(dockerMachine, infrav1.ContainerProvisionedCondition) // Setting machine address is required after move, because status.Address field is not retained during move. @@ -231,51 +253,52 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * return ctrl.Result{}, nil } - // Make sure bootstrap data is available and populated. - if machine.Spec.Bootstrap.DataSecretName == nil { - if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { - log.Info("Waiting for the control plane to be initialized") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + if !machinePoolOwned { + // Make sure bootstrap data is available and populated. + if machine.Spec.Bootstrap.DataSecretName == nil { + if !util.IsControlPlaneMachine(machine) && !conditions.IsTrue(cluster, clusterv1.ControlPlaneInitializedCondition) { + log.Info("Waiting for the control plane to be initialized") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, clusterv1.WaitingForControlPlaneAvailableReason, clusterv1.ConditionSeverityInfo, "") + return ctrl.Result{}, nil + } + + log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") + conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") return ctrl.Result{}, nil } - log.Info("Waiting for the Bootstrap provider controller to set bootstrap data") - conditions.MarkFalse(dockerMachine, infrav1.ContainerProvisionedCondition, infrav1.WaitingForBootstrapDataReason, clusterv1.ConditionSeverityInfo, "") - return ctrl.Result{}, nil - } - - // Create the docker container hosting the machine - role := constants.WorkerNodeRoleValue - if util.IsControlPlaneMachine(machine) { - role = constants.ControlPlaneNodeRoleValue - } + // Create the docker container hosting the machine + role := constants.WorkerNodeRoleValue + if util.IsControlPlaneMachine(machine) { + role = constants.ControlPlaneNodeRoleValue + } - // Create the machine if not existing yet - if !externalMachine.Exists() { - // NOTE: FailureDomains don't mean much in CAPD since it's all local, but we are setting a label on - // each container, so we can check placement. - if err := externalMachine.Create(ctx, dockerMachine.Spec.CustomImage, role, machine.Spec.Version, docker.FailureDomainLabel(machine.Spec.FailureDomain), dockerMachine.Spec.ExtraMounts); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to create worker DockerMachine") + // Create the machine if not existing yet + if !machinePoolOwned && !externalMachine.Exists() { + // NOTE: FailureDomains don't mean much in CAPD since it's all local, but we are setting a label on + // each container, so we can check placement. + if err := externalMachine.Create(ctx, dockerMachine.Spec.CustomImage, role, machine.Spec.Version, docker.FailureDomainLabel(machine.Spec.FailureDomain), dockerMachine.Spec.ExtraMounts); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to create worker DockerMachine") + } } - } - // Preload images into the container - if len(dockerMachine.Spec.PreLoadImages) > 0 { - if err := externalMachine.PreloadLoadImages(ctx, dockerMachine.Spec.PreLoadImages); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to pre-load images into the DockerMachine") + // Preload images into the container + if len(dockerMachine.Spec.PreLoadImages) > 0 { + if err := externalMachine.PreloadLoadImages(ctx, dockerMachine.Spec.PreLoadImages); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to pre-load images into the DockerMachine") + } } - } - // if the machine is a control plane update the load balancer configuration - // we should only do this once, as reconfiguration more or less ensures - // node ref setting fails - if util.IsControlPlaneMachine(machine) && !dockerMachine.Status.LoadBalancerConfigured { - if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") + // if the machine is a control plane update the load balancer configuration + // we should only do this once, as reconfiguration more or less ensures + // node ref setting fails + if util.IsControlPlaneMachine(machine) && !dockerMachine.Status.LoadBalancerConfigured { + if err := externalLoadBalancer.UpdateConfiguration(ctx); err != nil { + return ctrl.Result{}, errors.Wrap(err, "failed to update DockerCluster.loadbalancer configuration") + } + dockerMachine.Status.LoadBalancerConfigured = true } - dockerMachine.Status.LoadBalancerConfigured = true } - // Update the ContainerProvisionedCondition condition // NOTE: it is required to create the patch helper before this change otherwise it wont surface if // we issue a patch down in the code (because if we create patch helper after this point the ContainerProvisionedCondition=True exists both on before and after). @@ -306,38 +329,41 @@ func (r *DockerMachineReconciler) reconcileNormal(ctx context.Context, cluster * // but bootstrapped is never set on the object. We only try to bootstrap if the machine // is not already bootstrapped. if err := externalMachine.CheckForBootstrapSuccess(timeoutCtx, false); err != nil { - bootstrapData, format, err := r.getBootstrapData(timeoutCtx, machine) - if err != nil { - return ctrl.Result{}, err - } + if !machinePoolOwned { + bootstrapData, format, err := r.getBootstrapData(timeoutCtx, machine) + if err != nil { + return ctrl.Result{}, err + } - // Setup a go routing to check for the machine being deleted while running bootstrap as a - // synchronous process, e.g. due to remediation. The routine stops when timeoutCtx is Done - // (either because canceled intentionally due to machine deletion or canceled by the defer cancel() - // call when exiting from this func). - go func() { - for { - select { - case <-timeoutCtx.Done(): - return - default: - updatedDockerMachine := &infrav1.DockerMachine{} - if err := r.Client.Get(ctx, client.ObjectKeyFromObject(dockerMachine), updatedDockerMachine); err == nil && - !updatedDockerMachine.DeletionTimestamp.IsZero() { - log.Info("Cancelling Bootstrap because the underlying machine has been deleted") - cancel() + // Setup a go routing to check for the machine being deleted while running bootstrap as a + // synchronous process, e.g. due to remediation. The routine stops when timeoutCtx is Done + // (either because canceled intentionally due to machine deletion or canceled by the defer cancel() + // call when exiting from this func). + go func() { + for { + select { + case <-timeoutCtx.Done(): return + default: + updatedDockerMachine := &infrav1.DockerMachine{} + if err := r.Client.Get(ctx, client.ObjectKeyFromObject(dockerMachine), updatedDockerMachine); err == nil && + !updatedDockerMachine.DeletionTimestamp.IsZero() { + log.Info("Cancelling Bootstrap because the underlying machine has been deleted") + cancel() + return + } + time.Sleep(5 * time.Second) } - time.Sleep(5 * time.Second) } - } - }() + }() - // Run the bootstrap script. Simulates cloud-init/Ignition. - if err := externalMachine.ExecBootstrap(timeoutCtx, bootstrapData, format); err != nil { - conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") - return ctrl.Result{}, errors.Wrap(err, "failed to exec DockerMachine bootstrap") + // Run the bootstrap script. Simulates cloud-init/Ignition. + if err := externalMachine.ExecBootstrap(timeoutCtx, bootstrapData, format); err != nil { + conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") + return ctrl.Result{}, errors.Wrap(err, "failed to exec DockerMachine bootstrap") + } } + // Check for bootstrap success if err := externalMachine.CheckForBootstrapSuccess(timeoutCtx, true); err != nil { conditions.MarkFalse(dockerMachine, infrav1.BootstrapExecSucceededCondition, infrav1.BootstrapFailedReason, clusterv1.ConditionSeverityWarning, "Repeating bootstrap") @@ -430,7 +456,7 @@ func (r *DockerMachineReconciler) SetupWithManager(ctx context.Context, mgr ctrl err = ctrl.NewControllerManagedBy(mgr). For(&infrav1.DockerMachine{}). WithOptions(options). - WithEventFilter(predicates.ResourceNotPausedAndHasFilterLabel(ctrl.LoggerFrom(ctx), r.WatchFilterValue)). + WithEventFilter(predicates.ResourceNotPaused(ctrl.LoggerFrom(ctx))). Watches( &clusterv1.Machine{}, handler.EnqueueRequestsFromMapFunc(util.MachineToInfrastructureMapFunc(infrav1.GroupVersion.WithKind("DockerMachine"))),