From ac763ba664e7598ba8dad75fe7f9e6c67c085399 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 26 Mar 2024 17:13:07 +0100 Subject: [PATCH 01/10] Export ClusterClass controller Reconcile --- cmd/clusterctl/client/cluster/topology.go | 1 - controllers/alias.go | 22 ++++++++++++++----- .../clusterclass/clusterclass_controller.go | 3 +-- .../controllers/clusterclass/suite_test.go | 1 - .../topology/cluster/suite_test.go | 1 - internal/webhooks/test/suite_test.go | 1 - main.go | 1 - 7 files changed, 18 insertions(+), 12 deletions(-) diff --git a/cmd/clusterctl/client/cluster/topology.go b/cmd/clusterctl/client/cluster/topology.go index 7c377c679f53..08e5d5d16471 100644 --- a/cmd/clusterctl/client/cluster/topology.go +++ b/cmd/clusterctl/client/cluster/topology.go @@ -516,7 +516,6 @@ func reconcileClusterClass(ctx context.Context, apiReader client.Reader, class c clusterClassReconciler := &clusterclasscontroller.Reconciler{ Client: reconcilerClient, - APIReader: reconcilerClient, UnstructuredCachingClient: reconcilerClient, } diff --git a/controllers/alias.go b/controllers/alias.go index 7123686e5145..1bc4c7d90322 100644 --- a/controllers/alias.go +++ b/controllers/alias.go @@ -212,8 +212,11 @@ func (r *MachineSetTopologyReconciler) SetupWithManager(ctx context.Context, mgr // ClusterClassReconciler reconciles the ClusterClass object. type ClusterClassReconciler struct { - Client client.Client - APIReader client.Reader + // internalReconciler is used to store the reconciler after SetupWithManager + // so that the Reconcile function can work. + internalReconciler *clusterclasscontroller.Reconciler + + Client client.Client // RuntimeClient is a client for calling runtime extensions. RuntimeClient runtimeclient.Client @@ -227,11 +230,20 @@ type ClusterClassReconciler struct { } func (r *ClusterClassReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error { - return (&clusterclasscontroller.Reconciler{ + r.internalReconciler = &clusterclasscontroller.Reconciler{ Client: r.Client, - APIReader: r.APIReader, RuntimeClient: r.RuntimeClient, UnstructuredCachingClient: r.UnstructuredCachingClient, WatchFilterValue: r.WatchFilterValue, - }).SetupWithManager(ctx, mgr, options) + } + return r.internalReconciler.SetupWithManager(ctx, mgr, options) +} + +// Reconcile can be used to reconcile a ClusterClass. +// Before it can be used, all fields of the ClusterClassReconciler have to be set +// and SetupWithManager has to be called. +// This method can be used when testing the behavior of the desired state computation of +// the Cluster topology controller (because that requires a reconciled ClusterClass). +func (r *ClusterClassReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + return r.internalReconciler.Reconcile(ctx, req) } diff --git a/internal/controllers/clusterclass/clusterclass_controller.go b/internal/controllers/clusterclass/clusterclass_controller.go index 4d94939a47e1..e9412af070ee 100644 --- a/internal/controllers/clusterclass/clusterclass_controller.go +++ b/internal/controllers/clusterclass/clusterclass_controller.go @@ -58,8 +58,7 @@ import ( // Reconciler reconciles the ClusterClass object. type Reconciler struct { - Client client.Client - APIReader client.Reader + Client client.Client // WatchFilterValue is the label value used to filter events prior to reconciliation. WatchFilterValue string diff --git a/internal/controllers/clusterclass/suite_test.go b/internal/controllers/clusterclass/suite_test.go index 41f0dc2004a6..1880b4304726 100644 --- a/internal/controllers/clusterclass/suite_test.go +++ b/internal/controllers/clusterclass/suite_test.go @@ -73,7 +73,6 @@ func TestMain(m *testing.M) { } if err := (&Reconciler{ Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), UnstructuredCachingClient: unstructuredCachingClient, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 5}); err != nil { panic(fmt.Sprintf("unable to create clusterclass reconciler: %v", err)) diff --git a/internal/controllers/topology/cluster/suite_test.go b/internal/controllers/topology/cluster/suite_test.go index 338094256ca4..211c24e81c46 100644 --- a/internal/controllers/topology/cluster/suite_test.go +++ b/internal/controllers/topology/cluster/suite_test.go @@ -111,7 +111,6 @@ func TestMain(m *testing.M) { } if err := (&clusterclass.Reconciler{ Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), UnstructuredCachingClient: unstructuredCachingClient, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 5}); err != nil { panic(fmt.Sprintf("unable to create clusterclass reconciler: %v", err)) diff --git a/internal/webhooks/test/suite_test.go b/internal/webhooks/test/suite_test.go index 25fc42d150ef..4bb04e2f54dc 100644 --- a/internal/webhooks/test/suite_test.go +++ b/internal/webhooks/test/suite_test.go @@ -59,7 +59,6 @@ func TestMain(m *testing.M) { } if err := (&clusterclass.Reconciler{ Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), UnstructuredCachingClient: unstructuredCachingClient, }).SetupWithManager(ctx, mgr, controller.Options{MaxConcurrentReconciles: 5}); err != nil { panic(fmt.Sprintf("unable to create clusterclass reconciler: %v", err)) diff --git a/main.go b/main.go index 4156c082a94d..9812e5d59905 100644 --- a/main.go +++ b/main.go @@ -434,7 +434,6 @@ func setupReconcilers(ctx context.Context, mgr ctrl.Manager) webhooks.ClusterCac if feature.Gates.Enabled(feature.ClusterTopology) { if err := (&controllers.ClusterClassReconciler{ Client: mgr.GetClient(), - APIReader: mgr.GetAPIReader(), RuntimeClient: runtimeClient, UnstructuredCachingClient: unstructuredCachingClient, WatchFilterValue: watchFilterValue, From b58108f38e25b2dd9e091536ec33586ce191d2d7 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 26 Mar 2024 17:24:55 +0100 Subject: [PATCH 02/10] Export BuiltinVariables and variable util funcs --- .../topologymutation_variable_types.go | 188 ++++++++++++ .../api/v1alpha1/zz_generated.deepcopy.go | 271 ++++++++++++++++++ exp/runtime/topologymutation/variables.go | 63 ++++ .../topologymutation/variables_test.go | 29 ++ exp/runtime/topologymutation/walker.go | 5 +- exp/runtime/topologymutation/walker_test.go | 9 +- .../patches/inline/json_patch_generator.go | 7 +- .../inline/json_patch_generator_test.go | 31 +- .../cluster/patches/variables/merge.go | 75 ----- .../cluster/patches/variables/merge_test.go | 51 ---- .../cluster/patches/variables/variables.go | 225 ++------------- .../patches/variables/variables_test.go | 44 +-- .../handlers/topologymutation/handler_test.go | 41 ++- 13 files changed, 641 insertions(+), 398 deletions(-) create mode 100644 exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go delete mode 100644 internal/controllers/topology/cluster/patches/variables/merge.go delete mode 100644 internal/controllers/topology/cluster/patches/variables/merge_test.go diff --git a/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go b/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go new file mode 100644 index 000000000000..caa2408cb9df --- /dev/null +++ b/exp/runtime/hooks/api/v1alpha1/topologymutation_variable_types.go @@ -0,0 +1,188 @@ +/* +Copyright 2024 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// BuiltinsName is the name of the builtin variable. +const BuiltinsName = "builtin" + +// Builtins represents builtin variables exposed through patches. +type Builtins struct { + Cluster *ClusterBuiltins `json:"cluster,omitempty"` + ControlPlane *ControlPlaneBuiltins `json:"controlPlane,omitempty"` + MachineDeployment *MachineDeploymentBuiltins `json:"machineDeployment,omitempty"` + MachinePool *MachinePoolBuiltins `json:"machinePool,omitempty"` +} + +// ClusterBuiltins represents builtin cluster variables. +type ClusterBuiltins struct { + // Name is the name of the cluster. + Name string `json:"name,omitempty"` + + // Namespace is the namespace of the cluster. + Namespace string `json:"namespace,omitempty"` + + // Topology represents the cluster topology variables. + Topology *ClusterTopologyBuiltins `json:"topology,omitempty"` + + // Network represents the cluster network variables. + Network *ClusterNetworkBuiltins `json:"network,omitempty"` +} + +// ClusterTopologyBuiltins represents builtin cluster topology variables. +type ClusterTopologyBuiltins struct { + // Version is the Kubernetes version of the Cluster. + // NOTE: Please note that this version might temporarily differ from the version + // of the ControlPlane or workers while an upgrade process is being orchestrated. + Version string `json:"version,omitempty"` + + // Class is the name of the ClusterClass of the Cluster. + Class string `json:"class,omitempty"` +} + +// ClusterNetworkBuiltins represents builtin cluster network variables. +type ClusterNetworkBuiltins struct { + // ServiceDomain is the domain name for services. + ServiceDomain *string `json:"serviceDomain,omitempty"` + // Services is the network ranges from which service VIPs are allocated. + Services []string `json:"services,omitempty"` + // Pods is the network ranges from which Pod networks are allocated. + Pods []string `json:"pods,omitempty"` + // IPFamily is the IPFamily the Cluster is operating in. One of Invalid, IPv4, IPv6, DualStack. + // Note: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. + // IPFamily may be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521 + IPFamily string `json:"ipFamily,omitempty"` +} + +// ControlPlaneBuiltins represents builtin ControlPlane variables. +// NOTE: These variables are only set for templates belonging to the ControlPlane object. +type ControlPlaneBuiltins struct { + // Version is the Kubernetes version of the ControlPlane object. + // NOTE: Please note that this version is the version we are currently reconciling towards. + // It can differ from the current version of the ControlPlane while an upgrade process is + // being orchestrated. + Version string `json:"version,omitempty"` + + // Name is the name of the ControlPlane, + // to which the current template belongs to. + Name string `json:"name,omitempty"` + + // Replicas is the value of the replicas field of the ControlPlane object. + Replicas *int64 `json:"replicas,omitempty"` + + // MachineTemplate is the value of the .spec.machineTemplate field of the ControlPlane object. + MachineTemplate *ControlPlaneMachineTemplateBuiltins `json:"machineTemplate,omitempty"` +} + +// ControlPlaneMachineTemplateBuiltins is the value of the .spec.machineTemplate field of the ControlPlane object. +type ControlPlaneMachineTemplateBuiltins struct { + // InfrastructureRef is the value of the infrastructureRef field of ControlPlane.spec.machineTemplate. + InfrastructureRef ControlPlaneMachineTemplateInfrastructureRefBuiltins `json:"infrastructureRef,omitempty"` +} + +// ControlPlaneMachineTemplateInfrastructureRefBuiltins is the value of the infrastructureRef field of +// ControlPlane.spec.machineTemplate. +type ControlPlaneMachineTemplateInfrastructureRefBuiltins struct { + // Name of the infrastructureRef. + Name string `json:"name,omitempty"` +} + +// MachineDeploymentBuiltins represents builtin MachineDeployment variables. +// NOTE: These variables are only set for templates belonging to a MachineDeployment. +type MachineDeploymentBuiltins struct { + // Version is the Kubernetes version of the MachineDeployment, + // to which the current template belongs to. + // NOTE: Please note that this version is the version we are currently reconciling towards. + // It can differ from the current version of the MachineDeployment machines while an upgrade process is + // being orchestrated. + Version string `json:"version,omitempty"` + + // Class is the class name of the MachineDeployment, + // to which the current template belongs to. + Class string `json:"class,omitempty"` + + // Name is the name of the MachineDeployment, + // to which the current template belongs to. + Name string `json:"name,omitempty"` + + // TopologyName is the topology name of the MachineDeployment, + // to which the current template belongs to. + TopologyName string `json:"topologyName,omitempty"` + + // Replicas is the value of the replicas field of the MachineDeployment, + // to which the current template belongs to. + Replicas *int64 `json:"replicas,omitempty"` + + // Bootstrap is the value of the .spec.template.spec.bootstrap field of the MachineDeployment. + Bootstrap *MachineBootstrapBuiltins `json:"bootstrap,omitempty"` + + // InfrastructureRef is the value of the .spec.template.spec.infrastructureRef field of the MachineDeployment. + InfrastructureRef *MachineInfrastructureRefBuiltins `json:"infrastructureRef,omitempty"` +} + +// MachinePoolBuiltins represents builtin MachinePool variables. +// NOTE: These variables are only set for templates belonging to a MachinePool. +type MachinePoolBuiltins struct { + // Version is the Kubernetes version of the MachinePool, + // to which the current template belongs to. + // NOTE: Please note that this version is the version we are currently reconciling towards. + // It can differ from the current version of the MachinePool machines while an upgrade process is + // being orchestrated. + Version string `json:"version,omitempty"` + + // Class is the class name of the MachinePool, + // to which the current template belongs to. + Class string `json:"class,omitempty"` + + // Name is the name of the MachinePool, + // to which the current template belongs to. + Name string `json:"name,omitempty"` + + // TopologyName is the topology name of the MachinePool, + // to which the current template belongs to. + TopologyName string `json:"topologyName,omitempty"` + + // Replicas is the value of the replicas field of the MachinePool, + // to which the current template belongs to. + Replicas *int64 `json:"replicas,omitempty"` + + // Bootstrap is the value of the .spec.template.spec.bootstrap field of the MachinePool. + Bootstrap *MachineBootstrapBuiltins `json:"bootstrap,omitempty"` + + // InfrastructureRef is the value of the .spec.template.spec.infrastructureRef field of the MachinePool. + InfrastructureRef *MachineInfrastructureRefBuiltins `json:"infrastructureRef,omitempty"` +} + +// MachineBootstrapBuiltins is the value of the .spec.template.spec.bootstrap field +// of the MachineDeployment or MachinePool. +type MachineBootstrapBuiltins struct { + // ConfigRef is the value of the .spec.template.spec.bootstrap.configRef field of the MachineDeployment. + ConfigRef *MachineBootstrapConfigRefBuiltins `json:"configRef,omitempty"` +} + +// MachineBootstrapConfigRefBuiltins is the value of the .spec.template.spec.bootstrap.configRef +// field of the MachineDeployment or MachinePool. +type MachineBootstrapConfigRefBuiltins struct { + // Name of the bootstrap.configRef. + Name string `json:"name,omitempty"` +} + +// MachineInfrastructureRefBuiltins is the value of the .spec.template.spec.infrastructureRef field +// of the MachineDeployment or MachinePool. +type MachineInfrastructureRefBuiltins struct { + // Name of the infrastructureRef. + Name string `json:"name,omitempty"` +} diff --git a/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go b/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go index 9313d1a67aa2..f25c65e4d168 100644 --- a/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go +++ b/exp/runtime/hooks/api/v1alpha1/zz_generated.deepcopy.go @@ -331,6 +331,111 @@ func (in *BeforeClusterUpgradeResponse) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Builtins) DeepCopyInto(out *Builtins) { + *out = *in + if in.Cluster != nil { + in, out := &in.Cluster, &out.Cluster + *out = new(ClusterBuiltins) + (*in).DeepCopyInto(*out) + } + if in.ControlPlane != nil { + in, out := &in.ControlPlane, &out.ControlPlane + *out = new(ControlPlaneBuiltins) + (*in).DeepCopyInto(*out) + } + if in.MachineDeployment != nil { + in, out := &in.MachineDeployment, &out.MachineDeployment + *out = new(MachineDeploymentBuiltins) + (*in).DeepCopyInto(*out) + } + if in.MachinePool != nil { + in, out := &in.MachinePool, &out.MachinePool + *out = new(MachinePoolBuiltins) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Builtins. +func (in *Builtins) DeepCopy() *Builtins { + if in == nil { + return nil + } + out := new(Builtins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterBuiltins) DeepCopyInto(out *ClusterBuiltins) { + *out = *in + if in.Topology != nil { + in, out := &in.Topology, &out.Topology + *out = new(ClusterTopologyBuiltins) + **out = **in + } + if in.Network != nil { + in, out := &in.Network, &out.Network + *out = new(ClusterNetworkBuiltins) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterBuiltins. +func (in *ClusterBuiltins) DeepCopy() *ClusterBuiltins { + if in == nil { + return nil + } + out := new(ClusterBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterNetworkBuiltins) DeepCopyInto(out *ClusterNetworkBuiltins) { + *out = *in + if in.ServiceDomain != nil { + in, out := &in.ServiceDomain, &out.ServiceDomain + *out = new(string) + **out = **in + } + if in.Services != nil { + in, out := &in.Services, &out.Services + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterNetworkBuiltins. +func (in *ClusterNetworkBuiltins) DeepCopy() *ClusterNetworkBuiltins { + if in == nil { + return nil + } + out := new(ClusterNetworkBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterTopologyBuiltins) DeepCopyInto(out *ClusterTopologyBuiltins) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterTopologyBuiltins. +func (in *ClusterTopologyBuiltins) DeepCopy() *ClusterTopologyBuiltins { + if in == nil { + return nil + } + out := new(ClusterTopologyBuiltins) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CommonRequest) DeepCopyInto(out *CommonRequest) { *out = *in @@ -384,6 +489,62 @@ func (in *CommonRetryResponse) DeepCopy() *CommonRetryResponse { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneBuiltins) DeepCopyInto(out *ControlPlaneBuiltins) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int64) + **out = **in + } + if in.MachineTemplate != nil { + in, out := &in.MachineTemplate, &out.MachineTemplate + *out = new(ControlPlaneMachineTemplateBuiltins) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneBuiltins. +func (in *ControlPlaneBuiltins) DeepCopy() *ControlPlaneBuiltins { + if in == nil { + return nil + } + out := new(ControlPlaneBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineTemplateBuiltins) DeepCopyInto(out *ControlPlaneMachineTemplateBuiltins) { + *out = *in + out.InfrastructureRef = in.InfrastructureRef +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineTemplateBuiltins. +func (in *ControlPlaneMachineTemplateBuiltins) DeepCopy() *ControlPlaneMachineTemplateBuiltins { + if in == nil { + return nil + } + out := new(ControlPlaneMachineTemplateBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControlPlaneMachineTemplateInfrastructureRefBuiltins) DeepCopyInto(out *ControlPlaneMachineTemplateInfrastructureRefBuiltins) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControlPlaneMachineTemplateInfrastructureRefBuiltins. +func (in *ControlPlaneMachineTemplateInfrastructureRefBuiltins) DeepCopy() *ControlPlaneMachineTemplateInfrastructureRefBuiltins { + if in == nil { + return nil + } + out := new(ControlPlaneMachineTemplateInfrastructureRefBuiltins) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DiscoverVariablesRequest) DeepCopyInto(out *DiscoverVariablesRequest) { *out = *in @@ -668,6 +829,116 @@ func (in *HolderReference) DeepCopy() *HolderReference { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineBootstrapBuiltins) DeepCopyInto(out *MachineBootstrapBuiltins) { + *out = *in + if in.ConfigRef != nil { + in, out := &in.ConfigRef, &out.ConfigRef + *out = new(MachineBootstrapConfigRefBuiltins) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineBootstrapBuiltins. +func (in *MachineBootstrapBuiltins) DeepCopy() *MachineBootstrapBuiltins { + if in == nil { + return nil + } + out := new(MachineBootstrapBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineBootstrapConfigRefBuiltins) DeepCopyInto(out *MachineBootstrapConfigRefBuiltins) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineBootstrapConfigRefBuiltins. +func (in *MachineBootstrapConfigRefBuiltins) DeepCopy() *MachineBootstrapConfigRefBuiltins { + if in == nil { + return nil + } + out := new(MachineBootstrapConfigRefBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineDeploymentBuiltins) DeepCopyInto(out *MachineDeploymentBuiltins) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int64) + **out = **in + } + if in.Bootstrap != nil { + in, out := &in.Bootstrap, &out.Bootstrap + *out = new(MachineBootstrapBuiltins) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureRef != nil { + in, out := &in.InfrastructureRef, &out.InfrastructureRef + *out = new(MachineInfrastructureRefBuiltins) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineDeploymentBuiltins. +func (in *MachineDeploymentBuiltins) DeepCopy() *MachineDeploymentBuiltins { + if in == nil { + return nil + } + out := new(MachineDeploymentBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachineInfrastructureRefBuiltins) DeepCopyInto(out *MachineInfrastructureRefBuiltins) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachineInfrastructureRefBuiltins. +func (in *MachineInfrastructureRefBuiltins) DeepCopy() *MachineInfrastructureRefBuiltins { + if in == nil { + return nil + } + out := new(MachineInfrastructureRefBuiltins) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MachinePoolBuiltins) DeepCopyInto(out *MachinePoolBuiltins) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int64) + **out = **in + } + if in.Bootstrap != nil { + in, out := &in.Bootstrap, &out.Bootstrap + *out = new(MachineBootstrapBuiltins) + (*in).DeepCopyInto(*out) + } + if in.InfrastructureRef != nil { + in, out := &in.InfrastructureRef, &out.InfrastructureRef + *out = new(MachineInfrastructureRefBuiltins) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MachinePoolBuiltins. +func (in *MachinePoolBuiltins) DeepCopy() *MachinePoolBuiltins { + if in == nil { + return nil + } + out := new(MachinePoolBuiltins) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidateTopologyRequest) DeepCopyInto(out *ValidateTopologyRequest) { *out = *in diff --git a/exp/runtime/topologymutation/variables.go b/exp/runtime/topologymutation/variables.go index 1b7941217a84..7d34094e35e9 100644 --- a/exp/runtime/topologymutation/variables.go +++ b/exp/runtime/topologymutation/variables.go @@ -24,6 +24,7 @@ import ( "github.com/pkg/errors" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" patchvariables "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" ) @@ -87,3 +88,65 @@ func sanitizeJSON(input []byte) (output []byte) { output = []byte(strings.ReplaceAll(string(input), "\\", "")) return output } + +// ToMap converts a list of Variables to a map of apiextensionsv1.JSON (name is the map key). +// This is usually used to convert the Variables in a GeneratePatchesRequestItem into a format +// that is used by MergeVariableMaps. +func ToMap(variables []runtimehooksv1.Variable) map[string]apiextensionsv1.JSON { + variablesMap := map[string]apiextensionsv1.JSON{} + for i := range variables { + variablesMap[variables[i].Name] = variables[i].Value + } + return variablesMap +} + +// MergeVariableMaps merges variables. +// This func is useful when merging global and template-specific variables. +// NOTE: In case a variable exists in multiple maps, the variable from the latter map is preserved. +// NOTE: The builtin variable object is merged instead of simply overwritten. +func MergeVariableMaps(variableMaps ...map[string]apiextensionsv1.JSON) (map[string]apiextensionsv1.JSON, error) { + res := make(map[string]apiextensionsv1.JSON) + + for _, variableMap := range variableMaps { + for variableName, variableValue := range variableMap { + // If the variable already exists and is the builtin variable, merge it. + if _, ok := res[variableName]; ok && variableName == runtimehooksv1.BuiltinsName { + mergedV, err := mergeBuiltinVariables(res[variableName], variableValue) + if err != nil { + return nil, errors.Wrapf(err, "failed to merge builtin variables") + } + res[variableName] = *mergedV + continue + } + res[variableName] = variableValue + } + } + + return res, nil +} + +// mergeBuiltinVariables merges builtin variable objects. +// NOTE: In case a variable exists in multiple builtin variables, the variable from the latter map is preserved. +func mergeBuiltinVariables(variableList ...apiextensionsv1.JSON) (*apiextensionsv1.JSON, error) { + builtins := &runtimehooksv1.Builtins{} + + // Unmarshal all variables into builtins. + // NOTE: This accumulates the fields on the builtins. + // Fields will be overwritten by later Unmarshals if fields are + // set on multiple variables. + for _, variable := range variableList { + if err := json.Unmarshal(variable.Raw, builtins); err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal builtin variable") + } + } + + // Marshal builtins to JSON. + builtinVariableJSON, err := json.Marshal(builtins) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal builtin variable") + } + + return &apiextensionsv1.JSON{ + Raw: builtinVariableJSON, + }, nil +} diff --git a/exp/runtime/topologymutation/variables_test.go b/exp/runtime/topologymutation/variables_test.go index eb1920258569..7550f713fab9 100644 --- a/exp/runtime/topologymutation/variables_test.go +++ b/exp/runtime/topologymutation/variables_test.go @@ -22,6 +22,8 @@ import ( . "github.com/onsi/gomega" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/utils/ptr" + + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" ) func Test_GetRawTemplateVariable(t *testing.T) { @@ -280,3 +282,30 @@ func Test_GetVariableObjectWithNestedType(t *testing.T) { }) } } + +func TestMergeVariables(t *testing.T) { + t.Run("Merge variables", func(t *testing.T) { + g := NewWithT(t) + + m, err := MergeVariableMaps( + map[string]apiextensionsv1.JSON{ + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, + "a": {Raw: []byte("a-different")}, + "c": {Raw: []byte("c")}, + }, + map[string]apiextensionsv1.JSON{ + // Verify that builtin variables are merged correctly and + // the latter variables take precedent ("cluster-name-overwrite"). + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"controlPlane":{"replicas":3},"cluster":{"name":"cluster-name-overwrite"}}`)}, + "a": {Raw: []byte("a")}, + "b": {Raw: []byte("b")}, + }, + ) + g.Expect(err).ToNot(HaveOccurred()) + + g.Expect(m).To(HaveKeyWithValue(runtimehooksv1.BuiltinsName, apiextensionsv1.JSON{Raw: []byte(`{"cluster":{"name":"cluster-name-overwrite","namespace":"default","topology":{"version":"v1.21.1","class":"clusterClass1"}},"controlPlane":{"replicas":3}}`)})) + g.Expect(m).To(HaveKeyWithValue("a", apiextensionsv1.JSON{Raw: []byte("a")})) + g.Expect(m).To(HaveKeyWithValue("b", apiextensionsv1.JSON{Raw: []byte("b")})) + g.Expect(m).To(HaveKeyWithValue("c", apiextensionsv1.JSON{Raw: []byte("c")})) + }) +} diff --git a/exp/runtime/topologymutation/walker.go b/exp/runtime/topologymutation/walker.go index fd6357f0d80b..b188ccf49566 100644 --- a/exp/runtime/topologymutation/walker.go +++ b/exp/runtime/topologymutation/walker.go @@ -29,7 +29,6 @@ import ( ctrl "sigs.k8s.io/controller-runtime" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - patchvariables "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" ) // WalkTemplatesOption is some configuration that modifies WalkTemplates behavior. @@ -82,7 +81,7 @@ func WalkTemplates(ctx context.Context, decoder runtime.Decoder, req *runtimehoo resp *runtimehooksv1.GeneratePatchesResponse, mutateFunc func(ctx context.Context, obj runtime.Object, variables map[string]apiextensionsv1.JSON, holderRef runtimehooksv1.HolderReference) error, opts ...WalkTemplatesOption) { log := ctrl.LoggerFrom(ctx) - globalVariables := patchvariables.ToMap(req.Variables) + globalVariables := ToMap(req.Variables) options := newWalkTemplatesOptions() for _, o := range opts { @@ -93,7 +92,7 @@ func WalkTemplates(ctx context.Context, decoder runtime.Decoder, req *runtimehoo // TODO: add a notion of ordering the patch implementers can rely on. Ideally ordering could be pluggable via options. for _, requestItem := range req.Items { // Computes the variables that apply to the template, by merging global and template variables. - templateVariables, err := patchvariables.MergeVariableMaps(globalVariables, patchvariables.ToMap(requestItem.Variables)) + templateVariables, err := MergeVariableMaps(globalVariables, ToMap(requestItem.Variables)) if err != nil { resp.Status = runtimehooksv1.ResponseStatusFailure resp.Message = err.Error() diff --git a/exp/runtime/topologymutation/walker_test.go b/exp/runtime/topologymutation/walker_test.go index 996ed03ef3e2..58370a30492e 100644 --- a/exp/runtime/topologymutation/walker_test.go +++ b/exp/runtime/topologymutation/walker_test.go @@ -32,7 +32,6 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" ) var ( @@ -82,21 +81,21 @@ func Test_WalkTemplates(t *testing.T) { { name: "Fails for invalid builtin variables", globalVariables: []runtimehooksv1.Variable{ - newVariable(variables.BuiltinsName, variables.Builtins{ - Cluster: &variables.ClusterBuiltins{ + newVariable(runtimehooksv1.BuiltinsName, runtimehooksv1.Builtins{ + Cluster: &runtimehooksv1.ClusterBuiltins{ Name: "test", }, }), }, requestItems: []runtimehooksv1.GeneratePatchesRequestItem{ requestItem("1", kubeadmControlPlaneTemplate, []runtimehooksv1.Variable{ - newVariable(variables.BuiltinsName, "{invalid-builtin-value}"), + newVariable(runtimehooksv1.BuiltinsName, "{invalid-builtin-value}"), }), }, expectedResponse: &runtimehooksv1.GeneratePatchesResponse{ CommonResponse: runtimehooksv1.CommonResponse{ Status: runtimehooksv1.ResponseStatusFailure, - Message: "failed to merge builtin variables: failed to unmarshal builtin variable: json: cannot unmarshal string into Go value of type variables.Builtins", + Message: "failed to merge builtin variables: failed to unmarshal builtin variable: json: cannot unmarshal string into Go value of type v1alpha1.Builtins", }, }, }, diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go index 1c80acb634a7..fe09c91100de 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator.go @@ -34,6 +34,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/runtime/topologymutation" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/api" patchvariables "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" @@ -55,7 +56,7 @@ func NewGenerator(patch *clusterv1.ClusterClassPatch) api.Generator { func (j *jsonPatchGenerator) Generate(_ context.Context, _ client.Object, req *runtimehooksv1.GeneratePatchesRequest) (*runtimehooksv1.GeneratePatchesResponse, error) { resp := &runtimehooksv1.GeneratePatchesResponse{} - globalVariables := patchvariables.ToMap(req.Variables) + globalVariables := topologymutation.ToMap(req.Variables) // Loop over all templates. errs := []error{} @@ -63,7 +64,7 @@ func (j *jsonPatchGenerator) Generate(_ context.Context, _ client.Object, req *r item := &req.Items[i] objectKind := item.Object.Object.GetObjectKind().GroupVersionKind().Kind - templateVariables := patchvariables.ToMap(item.Variables) + templateVariables := topologymutation.ToMap(item.Variables) // Calculate the list of patches which match the current template. matchingPatches := []clusterv1.PatchDefinition{} @@ -80,7 +81,7 @@ func (j *jsonPatchGenerator) Generate(_ context.Context, _ client.Object, req *r } // Merge template-specific and global variables. - variables, err := patchvariables.MergeVariableMaps(globalVariables, templateVariables) + variables, err := topologymutation.MergeVariableMaps(globalVariables, templateVariables) if err != nil { errs = append(errs, errors.Wrapf(err, "failed to merge global and template-specific variables for %q", objectKind)) continue diff --git a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go index 39e8b5e6dfa9..3fc2fcf5ca04 100644 --- a/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go +++ b/internal/controllers/topology/cluster/patches/inline/json_patch_generator_test.go @@ -31,7 +31,6 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - patchvariables "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" ) func TestGenerate(t *testing.T) { @@ -1525,7 +1524,7 @@ func TestCalculateValue(t *testing.T) { }, }, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"controlPlane":{"replicas":3}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"controlPlane":{"replicas":3}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`3`)}, }, @@ -1537,7 +1536,7 @@ func TestCalculateValue(t *testing.T) { }, }, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`"v1.21.1"`)}, }, @@ -1549,7 +1548,7 @@ func TestCalculateValue(t *testing.T) { }, }, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, @@ -1561,7 +1560,7 @@ func TestCalculateValue(t *testing.T) { }, }, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}`)}, }, @@ -1573,7 +1572,7 @@ func TestCalculateValue(t *testing.T) { }, }, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`{"class":"clusterClass1","version":"v1.21.1"}`)}, }, @@ -1941,7 +1940,7 @@ func TestRenderValueTemplate(t *testing.T) { name: "Should render depending on variable existence: variable is set", template: `{{ if .vnetName }}{{.vnetName}}{{else}}{{.builtin.cluster.name}}-vnet{{end}}`, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, "vnetName": {Raw: []byte(`"custom-network"`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`"custom-network"`)}, @@ -1950,7 +1949,7 @@ func TestRenderValueTemplate(t *testing.T) { name: "Should render depending on variable existence: variable is not set", template: `{{ if .vnetName }}{{.vnetName}}{{else}}{{.builtin.cluster.name}}-vnet{{end}}`, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`"cluster1-vnet"`)}, }, @@ -1965,7 +1964,7 @@ func TestRenderValueTemplate(t *testing.T) { owner: root:root `, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(` [{ @@ -1989,7 +1988,7 @@ contentFrom: owner: root:root `, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(` { @@ -2017,7 +2016,7 @@ owner: root:root "owner":"root:root" }]`, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(` [{ @@ -2044,7 +2043,7 @@ owner: root:root "owner":"root:root" }`, variables: map[string]apiextensionsv1.JSON{ - patchvariables.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster1"}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(` { @@ -2166,7 +2165,7 @@ owner: root:root } }`)}, // Schema must either support complex objects with predefined keys/mdClasses or maps with additionalProperties. - patchvariables.BuiltinsName: {Raw: []byte(`{ + runtimehooksv1.BuiltinsName: {Raw: []byte(`{ "machineDeployment":{ "version":"v1.21.1", "class":"mdClass2", @@ -2191,7 +2190,7 @@ owner: root:root } }`)}, // Schema must either support complex objects with predefined keys/mdClasses or maps with additionalProperties. - patchvariables.BuiltinsName: {Raw: []byte(`{ + runtimehooksv1.BuiltinsName: {Raw: []byte(`{ "machinePool":{ "version":"v1.21.1", "class":"mpClass2", @@ -2216,7 +2215,7 @@ owner: root:root } }`)}, // Schema must either support complex objects with predefined keys/mdClasses or maps with additionalProperties. - patchvariables.BuiltinsName: {Raw: []byte(`{"machineDeployment":{"version":"v1.21.1","class":"mdClass2","name":"md1","topologyName":"md-topology","replicas":3}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"machineDeployment":{"version":"v1.21.1","class":"mdClass2","name":"md1","topologyName":"md-topology","replicas":3}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`"configValue2"`)}, }, @@ -2233,7 +2232,7 @@ owner: root:root } }`)}, // Schema must either support complex objects with predefined keys/mpClasses or maps with additionalProperties. - patchvariables.BuiltinsName: {Raw: []byte(`{"machinePool":{"version":"v1.21.1","class":"mpClass2","name":"mp1","topologyName":"mp-topology","replicas":3}}`)}, + runtimehooksv1.BuiltinsName: {Raw: []byte(`{"machinePool":{"version":"v1.21.1","class":"mpClass2","name":"mp1","topologyName":"mp-topology","replicas":3}}`)}, }, want: &apiextensionsv1.JSON{Raw: []byte(`"configValue2"`)}, }, diff --git a/internal/controllers/topology/cluster/patches/variables/merge.go b/internal/controllers/topology/cluster/patches/variables/merge.go deleted file mode 100644 index 72afb411c4fa..000000000000 --- a/internal/controllers/topology/cluster/patches/variables/merge.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package variables - -import ( - "encoding/json" - - "github.com/pkg/errors" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" -) - -// MergeVariableMaps merges variables. -// This func is useful when merging global and template-specific variables. -// NOTE: In case a variable exists in multiple maps, the variable from the latter map is preserved. -// NOTE: The builtin variable object is merged instead of simply overwritten. -func MergeVariableMaps(variableMaps ...map[string]apiextensionsv1.JSON) (map[string]apiextensionsv1.JSON, error) { - res := make(map[string]apiextensionsv1.JSON) - - for _, variableMap := range variableMaps { - for variableName, variableValue := range variableMap { - // If the variable already exists and is the builtin variable, merge it. - if _, ok := res[variableName]; ok && variableName == BuiltinsName { - mergedV, err := mergeBuiltinVariables(res[variableName], variableValue) - if err != nil { - return nil, errors.Wrapf(err, "failed to merge builtin variables") - } - res[variableName] = *mergedV - continue - } - res[variableName] = variableValue - } - } - - return res, nil -} - -// mergeBuiltinVariables merges builtin variable objects. -// NOTE: In case a variable exists in multiple builtin variables, the variable from the latter map is preserved. -func mergeBuiltinVariables(variableList ...apiextensionsv1.JSON) (*apiextensionsv1.JSON, error) { - builtins := &Builtins{} - - // Unmarshal all variables into builtins. - // NOTE: This accumulates the fields on the builtins. - // Fields will be overwritten by later Unmarshals if fields are - // set on multiple variables. - for _, variable := range variableList { - if err := json.Unmarshal(variable.Raw, builtins); err != nil { - return nil, errors.Wrapf(err, "failed to unmarshal builtin variable") - } - } - - // Marshal builtins to JSON. - builtinVariableJSON, err := json.Marshal(builtins) - if err != nil { - return nil, errors.Wrapf(err, "failed to marshal builtin variable") - } - - return &apiextensionsv1.JSON{ - Raw: builtinVariableJSON, - }, nil -} diff --git a/internal/controllers/topology/cluster/patches/variables/merge_test.go b/internal/controllers/topology/cluster/patches/variables/merge_test.go deleted file mode 100644 index 3af6e5ed5dc6..000000000000 --- a/internal/controllers/topology/cluster/patches/variables/merge_test.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package variables - -import ( - "testing" - - . "github.com/onsi/gomega" - apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" -) - -func TestMergeVariables(t *testing.T) { - t.Run("Merge variables", func(t *testing.T) { - g := NewWithT(t) - - m, err := MergeVariableMaps( - map[string]apiextensionsv1.JSON{ - BuiltinsName: {Raw: []byte(`{"cluster":{"name":"cluster-name","namespace":"default","topology":{"class":"clusterClass1","version":"v1.21.1"}}}`)}, - "a": {Raw: []byte("a-different")}, - "c": {Raw: []byte("c")}, - }, - map[string]apiextensionsv1.JSON{ - // Verify that builtin variables are merged correctly and - // the latter variables take precedent ("cluster-name-overwrite"). - BuiltinsName: {Raw: []byte(`{"controlPlane":{"replicas":3},"cluster":{"name":"cluster-name-overwrite"}}`)}, - "a": {Raw: []byte("a")}, - "b": {Raw: []byte("b")}, - }, - ) - g.Expect(err).ToNot(HaveOccurred()) - - g.Expect(m).To(HaveKeyWithValue(BuiltinsName, apiextensionsv1.JSON{Raw: []byte(`{"cluster":{"name":"cluster-name-overwrite","namespace":"default","topology":{"version":"v1.21.1","class":"clusterClass1"}},"controlPlane":{"replicas":3}}`)})) - g.Expect(m).To(HaveKeyWithValue("a", apiextensionsv1.JSON{Raw: []byte("a")})) - g.Expect(m).To(HaveKeyWithValue("b", apiextensionsv1.JSON{Raw: []byte("b")})) - g.Expect(m).To(HaveKeyWithValue("c", apiextensionsv1.JSON{Raw: []byte("c")})) - }) -} diff --git a/internal/controllers/topology/cluster/patches/variables/variables.go b/internal/controllers/topology/cluster/patches/variables/variables.go index dd7ac0b7286c..9887f0739ef5 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables.go +++ b/internal/controllers/topology/cluster/patches/variables/variables.go @@ -32,180 +32,10 @@ import ( ) const ( - // BuiltinsName is the name of the builtin variable. - BuiltinsName = "builtin" // emptyDefinitionFrom may be supplied in variable values. emptyDefinitionFrom = "" ) -// Builtins represents builtin variables exposed through patches. -type Builtins struct { - Cluster *ClusterBuiltins `json:"cluster,omitempty"` - ControlPlane *ControlPlaneBuiltins `json:"controlPlane,omitempty"` - MachineDeployment *MachineDeploymentBuiltins `json:"machineDeployment,omitempty"` - MachinePool *MachinePoolBuiltins `json:"machinePool,omitempty"` -} - -// ClusterBuiltins represents builtin cluster variables. -type ClusterBuiltins struct { - // Name is the name of the cluster. - Name string `json:"name,omitempty"` - - // Namespace is the namespace of the cluster. - Namespace string `json:"namespace,omitempty"` - - // Topology represents the cluster topology variables. - Topology *ClusterTopologyBuiltins `json:"topology,omitempty"` - - // Network represents the cluster network variables. - Network *ClusterNetworkBuiltins `json:"network,omitempty"` -} - -// ClusterTopologyBuiltins represents builtin cluster topology variables. -type ClusterTopologyBuiltins struct { - // Version is the Kubernetes version of the Cluster. - // NOTE: Please note that this version might temporarily differ from the version - // of the ControlPlane or workers while an upgrade process is being orchestrated. - Version string `json:"version,omitempty"` - - // Class is the name of the ClusterClass of the Cluster. - Class string `json:"class,omitempty"` -} - -// ClusterNetworkBuiltins represents builtin cluster network variables. -type ClusterNetworkBuiltins struct { - // ServiceDomain is the domain name for services. - ServiceDomain *string `json:"serviceDomain,omitempty"` - // Services is the network ranges from which service VIPs are allocated. - Services []string `json:"services,omitempty"` - // Pods is the network ranges from which Pod networks are allocated. - Pods []string `json:"pods,omitempty"` - // IPFamily is the IPFamily the Cluster is operating in. One of Invalid, IPv4, IPv6, DualStack. - // Note: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. - // IPFamily may be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521 - IPFamily string `json:"ipFamily,omitempty"` -} - -// ControlPlaneBuiltins represents builtin ControlPlane variables. -// NOTE: These variables are only set for templates belonging to the ControlPlane object. -type ControlPlaneBuiltins struct { - // Version is the Kubernetes version of the ControlPlane object. - // NOTE: Please note that this version is the version we are currently reconciling towards. - // It can differ from the current version of the ControlPlane while an upgrade process is - // being orchestrated. - Version string `json:"version,omitempty"` - - // Name is the name of the ControlPlane, - // to which the current template belongs to. - Name string `json:"name,omitempty"` - - // Replicas is the value of the replicas field of the ControlPlane object. - Replicas *int64 `json:"replicas,omitempty"` - - // MachineTemplate is the value of the .spec.machineTemplate field of the ControlPlane object. - MachineTemplate *ControlPlaneMachineTemplateBuiltins `json:"machineTemplate,omitempty"` -} - -// ControlPlaneMachineTemplateBuiltins is the value of the .spec.machineTemplate field of the ControlPlane object. -type ControlPlaneMachineTemplateBuiltins struct { - // InfrastructureRef is the value of the infrastructureRef field of ControlPlane.spec.machineTemplate. - InfrastructureRef ControlPlaneMachineTemplateInfrastructureRefBuiltins `json:"infrastructureRef,omitempty"` -} - -// ControlPlaneMachineTemplateInfrastructureRefBuiltins is the value of the infrastructureRef field of -// ControlPlane.spec.machineTemplate. -type ControlPlaneMachineTemplateInfrastructureRefBuiltins struct { - // Name of the infrastructureRef. - Name string `json:"name,omitempty"` -} - -// MachineDeploymentBuiltins represents builtin MachineDeployment variables. -// NOTE: These variables are only set for templates belonging to a MachineDeployment. -type MachineDeploymentBuiltins struct { - // Version is the Kubernetes version of the MachineDeployment, - // to which the current template belongs to. - // NOTE: Please note that this version is the version we are currently reconciling towards. - // It can differ from the current version of the MachineDeployment machines while an upgrade process is - // being orchestrated. - Version string `json:"version,omitempty"` - - // Class is the class name of the MachineDeployment, - // to which the current template belongs to. - Class string `json:"class,omitempty"` - - // Name is the name of the MachineDeployment, - // to which the current template belongs to. - Name string `json:"name,omitempty"` - - // TopologyName is the topology name of the MachineDeployment, - // to which the current template belongs to. - TopologyName string `json:"topologyName,omitempty"` - - // Replicas is the value of the replicas field of the MachineDeployment, - // to which the current template belongs to. - Replicas *int64 `json:"replicas,omitempty"` - - // Bootstrap is the value of the .spec.template.spec.bootstrap field of the MachineDeployment. - Bootstrap *MachineBootstrapBuiltins `json:"bootstrap,omitempty"` - - // InfrastructureRef is the value of the .spec.template.spec.infrastructureRef field of the MachineDeployment. - InfrastructureRef *MachineInfrastructureRefBuiltins `json:"infrastructureRef,omitempty"` -} - -// MachinePoolBuiltins represents builtin MachinePool variables. -// NOTE: These variables are only set for templates belonging to a MachinePool. -type MachinePoolBuiltins struct { - // Version is the Kubernetes version of the MachinePool, - // to which the current template belongs to. - // NOTE: Please note that this version is the version we are currently reconciling towards. - // It can differ from the current version of the MachinePool machines while an upgrade process is - // being orchestrated. - Version string `json:"version,omitempty"` - - // Class is the class name of the MachinePool, - // to which the current template belongs to. - Class string `json:"class,omitempty"` - - // Name is the name of the MachinePool, - // to which the current template belongs to. - Name string `json:"name,omitempty"` - - // TopologyName is the topology name of the MachinePool, - // to which the current template belongs to. - TopologyName string `json:"topologyName,omitempty"` - - // Replicas is the value of the replicas field of the MachinePool, - // to which the current template belongs to. - Replicas *int64 `json:"replicas,omitempty"` - - // Bootstrap is the value of the .spec.template.spec.bootstrap field of the MachinePool. - Bootstrap *MachineBootstrapBuiltins `json:"bootstrap,omitempty"` - - // InfrastructureRef is the value of the .spec.template.spec.infrastructureRef field of the MachinePool. - InfrastructureRef *MachineInfrastructureRefBuiltins `json:"infrastructureRef,omitempty"` -} - -// MachineBootstrapBuiltins is the value of the .spec.template.spec.bootstrap field -// of the MachineDeployment or MachinePool. -type MachineBootstrapBuiltins struct { - // ConfigRef is the value of the .spec.template.spec.bootstrap.configRef field of the MachineDeployment. - ConfigRef *MachineBootstrapConfigRefBuiltins `json:"configRef,omitempty"` -} - -// MachineBootstrapConfigRefBuiltins is the value of the .spec.template.spec.bootstrap.configRef -// field of the MachineDeployment or MachinePool. -type MachineBootstrapConfigRefBuiltins struct { - // Name of the bootstrap.configRef. - Name string `json:"name,omitempty"` -} - -// MachineInfrastructureRefBuiltins is the value of the .spec.template.spec.infrastructureRef field -// of the MachineDeployment or MachinePool. -type MachineInfrastructureRefBuiltins struct { - // Name of the infrastructureRef. - Name string `json:"name,omitempty"` -} - // Global returns variables that apply to all the templates, including user provided variables // and builtin variables for the Cluster object. func Global(clusterTopology *clusterv1.Topology, cluster *clusterv1.Cluster, definitionFrom string, patchVariableDefinitions map[string]bool) ([]runtimehooksv1.Variable, error) { @@ -214,7 +44,7 @@ func Global(clusterTopology *clusterv1.Topology, cluster *clusterv1.Cluster, def // Add user defined variables from Cluster.spec.topology.variables. for _, variable := range clusterTopology.Variables { // Don't add user-defined "builtin" variable. - if variable.Name == BuiltinsName { + if variable.Name == runtimehooksv1.BuiltinsName { continue } // Add the variable if it is defined for the current patch or it is defined for all the patches. @@ -227,11 +57,11 @@ func Global(clusterTopology *clusterv1.Topology, cluster *clusterv1.Cluster, def } // Construct builtin variable. - builtin := Builtins{ - Cluster: &ClusterBuiltins{ + builtin := runtimehooksv1.Builtins{ + Cluster: &runtimehooksv1.ClusterBuiltins{ Name: cluster.Name, Namespace: cluster.Namespace, - Topology: &ClusterTopologyBuiltins{ + Topology: &runtimehooksv1.ClusterTopologyBuiltins{ Version: cluster.Spec.Topology.Version, Class: cluster.Spec.Topology.Class, }, @@ -239,7 +69,7 @@ func Global(clusterTopology *clusterv1.Topology, cluster *clusterv1.Cluster, def } if cluster.Spec.ClusterNetwork != nil { clusterNetworkIPFamily, _ := cluster.GetIPFamily() - builtin.Cluster.Network = &ClusterNetworkBuiltins{ + builtin.Cluster.Network = &runtimehooksv1.ClusterNetworkBuiltins{ IPFamily: ipFamilyToString(clusterNetworkIPFamily), } if cluster.Spec.ClusterNetwork.ServiceDomain != "" { @@ -254,7 +84,7 @@ func Global(clusterTopology *clusterv1.Topology, cluster *clusterv1.Cluster, def } // Add builtin variables derived from the cluster object. - variable, err := toVariable(BuiltinsName, builtin) + variable, err := toVariable(runtimehooksv1.BuiltinsName, builtin) if err != nil { return nil, err } @@ -268,8 +98,8 @@ func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructu variables := []runtimehooksv1.Variable{} // Construct builtin variable. - builtin := Builtins{ - ControlPlane: &ControlPlaneBuiltins{ + builtin := runtimehooksv1.Builtins{ + ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ Name: cp.GetName(), }, } @@ -293,14 +123,14 @@ func ControlPlane(cpTopology *clusterv1.ControlPlaneTopology, cp, cpInfrastructu builtin.ControlPlane.Version = *version if cpInfrastructureMachineTemplate != nil { - builtin.ControlPlane.MachineTemplate = &ControlPlaneMachineTemplateBuiltins{ - InfrastructureRef: ControlPlaneMachineTemplateInfrastructureRefBuiltins{ + builtin.ControlPlane.MachineTemplate = &runtimehooksv1.ControlPlaneMachineTemplateBuiltins{ + InfrastructureRef: runtimehooksv1.ControlPlaneMachineTemplateInfrastructureRefBuiltins{ Name: cpInfrastructureMachineTemplate.GetName(), }, } } - variable, err := toVariable(BuiltinsName, builtin) + variable, err := toVariable(runtimehooksv1.BuiltinsName, builtin) if err != nil { return nil, err } @@ -327,8 +157,8 @@ func MachineDeployment(mdTopology *clusterv1.MachineDeploymentTopology, md *clus } // Construct builtin variable. - builtin := Builtins{ - MachineDeployment: &MachineDeploymentBuiltins{ + builtin := runtimehooksv1.Builtins{ + MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ Version: *md.Spec.Template.Spec.Version, Class: mdTopology.Class, Name: md.Name, @@ -340,20 +170,20 @@ func MachineDeployment(mdTopology *clusterv1.MachineDeploymentTopology, md *clus } if mdBootstrapTemplate != nil { - builtin.MachineDeployment.Bootstrap = &MachineBootstrapBuiltins{ - ConfigRef: &MachineBootstrapConfigRefBuiltins{ + builtin.MachineDeployment.Bootstrap = &runtimehooksv1.MachineBootstrapBuiltins{ + ConfigRef: &runtimehooksv1.MachineBootstrapConfigRefBuiltins{ Name: mdBootstrapTemplate.GetName(), }, } } if mdInfrastructureMachineTemplate != nil { - builtin.MachineDeployment.InfrastructureRef = &MachineInfrastructureRefBuiltins{ + builtin.MachineDeployment.InfrastructureRef = &runtimehooksv1.MachineInfrastructureRefBuiltins{ Name: mdInfrastructureMachineTemplate.GetName(), } } - variable, err := toVariable(BuiltinsName, builtin) + variable, err := toVariable(runtimehooksv1.BuiltinsName, builtin) if err != nil { return nil, err } @@ -380,8 +210,8 @@ func MachinePool(mpTopology *clusterv1.MachinePoolTopology, mp *expv1.MachinePoo } // Construct builtin variable. - builtin := Builtins{ - MachinePool: &MachinePoolBuiltins{ + builtin := runtimehooksv1.Builtins{ + MachinePool: &runtimehooksv1.MachinePoolBuiltins{ Version: *mp.Spec.Template.Spec.Version, Class: mpTopology.Class, Name: mp.Name, @@ -393,20 +223,20 @@ func MachinePool(mpTopology *clusterv1.MachinePoolTopology, mp *expv1.MachinePoo } if mpBootstrapObject != nil { - builtin.MachinePool.Bootstrap = &MachineBootstrapBuiltins{ - ConfigRef: &MachineBootstrapConfigRefBuiltins{ + builtin.MachinePool.Bootstrap = &runtimehooksv1.MachineBootstrapBuiltins{ + ConfigRef: &runtimehooksv1.MachineBootstrapConfigRefBuiltins{ Name: mpBootstrapObject.GetName(), }, } } if mpInfrastructureMachinePool != nil { - builtin.MachinePool.InfrastructureRef = &MachineInfrastructureRefBuiltins{ + builtin.MachinePool.InfrastructureRef = &runtimehooksv1.MachineInfrastructureRefBuiltins{ Name: mpInfrastructureMachinePool.GetName(), } } - variable, err := toVariable(BuiltinsName, builtin) + variable, err := toVariable(runtimehooksv1.BuiltinsName, builtin) if err != nil { return nil, err } @@ -440,12 +270,3 @@ func ipFamilyToString(ipFamily clusterv1.ClusterIPFamily) string { return "Invalid" } } - -// ToMap converts a list of Variables to a map of JSON (name is the map key). -func ToMap(variables []runtimehooksv1.Variable) map[string]apiextensionsv1.JSON { - variablesMap := map[string]apiextensionsv1.JSON{} - for i := range variables { - variablesMap[variables[i].Name] = variables[i].Value - } - return variablesMap -} diff --git a/internal/controllers/topology/cluster/patches/variables/variables_test.go b/internal/controllers/topology/cluster/patches/variables/variables_test.go index 7b6341deb80e..52ff8b4fee77 100644 --- a/internal/controllers/topology/cluster/patches/variables/variables_test.go +++ b/internal/controllers/topology/cluster/patches/variables/variables_test.go @@ -95,7 +95,7 @@ func TestGlobal(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "cluster":{ "name": "cluster1", @@ -176,7 +176,7 @@ func TestGlobal(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "cluster":{ "name": "cluster1", @@ -247,7 +247,7 @@ func TestGlobal(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "cluster":{ "name": "cluster1", @@ -314,7 +314,7 @@ func TestGlobal(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "cluster":{ "name": "cluster1", @@ -376,7 +376,7 @@ func TestGlobal(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "cluster":{ "name": "cluster1", @@ -420,7 +420,7 @@ func TestControlPlane(t *testing.T) { Build(), want: []runtimehooksv1.Variable{ { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "controlPlane":{ "version": "v1.21.1", @@ -438,7 +438,7 @@ func TestControlPlane(t *testing.T) { Build(), want: []runtimehooksv1.Variable{ { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "controlPlane":{ "version": "v1.21.1", @@ -460,7 +460,7 @@ func TestControlPlane(t *testing.T) { Build(), want: []runtimehooksv1.Variable{ { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "controlPlane":{ "version": "v1.21.1", @@ -533,7 +533,7 @@ func TestMachineDeployment(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -598,7 +598,7 @@ func TestMachineDeployment(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -625,7 +625,7 @@ func TestMachineDeployment(t *testing.T) { Build(), want: []runtimehooksv1.Variable{ { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -670,7 +670,7 @@ func TestMachineDeployment(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -717,7 +717,7 @@ func TestMachineDeployment(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -770,7 +770,7 @@ func TestMachineDeployment(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -822,7 +822,7 @@ func TestMachineDeployment(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machineDeployment":{ "version": "v1.21.1", @@ -900,7 +900,7 @@ func TestMachinePool(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", @@ -965,7 +965,7 @@ func TestMachinePool(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", @@ -992,7 +992,7 @@ func TestMachinePool(t *testing.T) { Build(), want: []runtimehooksv1.Variable{ { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", @@ -1037,7 +1037,7 @@ func TestMachinePool(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", @@ -1084,7 +1084,7 @@ func TestMachinePool(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", @@ -1137,7 +1137,7 @@ func TestMachinePool(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", @@ -1189,7 +1189,7 @@ func TestMachinePool(t *testing.T) { Value: toJSON("8"), }, { - Name: BuiltinsName, + Name: runtimehooksv1.BuiltinsName, Value: toJSONCompact(`{ "machinePool":{ "version": "v1.21.1", diff --git a/test/extension/handlers/topologymutation/handler_test.go b/test/extension/handlers/topologymutation/handler_test.go index 9af73565c421..1b693ee064f3 100644 --- a/test/extension/handlers/topologymutation/handler_test.go +++ b/test/extension/handlers/topologymutation/handler_test.go @@ -33,7 +33,6 @@ import ( bootstrapv1 "sigs.k8s.io/cluster-api/bootstrap/kubeadm/api/v1beta1" controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" infrav1 "sigs.k8s.io/cluster-api/test/infrastructure/docker/api/v1beta1" ) @@ -118,8 +117,8 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { name: "sets KubeletExtraArgs[cgroup-driver] to cgroupfs for Kubernetes < 1.24", template: &controlplanev1.KubeadmControlPlaneTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - ControlPlane: &variables.ControlPlaneBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ Version: "v1.23.0", }, })}, @@ -149,8 +148,8 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { name: "do not set KubeletExtraArgs[cgroup-driver] to cgroupfs for Kubernetes >= 1.24", template: &controlplanev1.KubeadmControlPlaneTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - ControlPlane: &variables.ControlPlaneBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ Version: "v1.24.0", }, })}, @@ -161,8 +160,8 @@ func Test_patchKubeadmControlPlaneTemplate(t *testing.T) { name: "sets RolloutStrategy.RollingUpdate.MaxSurge if the kubeadmControlPlaneMaxSurge is provided", template: &controlplanev1.KubeadmControlPlaneTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - ControlPlane: &variables.ControlPlaneBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ Version: "v1.24.0", }, })}, @@ -215,8 +214,8 @@ func Test_patchKubeadmConfigTemplate(t *testing.T) { name: "no op for MachineDeployment class != default-worker", template: &bootstrapv1.KubeadmConfigTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - MachineDeployment: &variables.MachineDeploymentBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ Class: "another-class", }, })}, @@ -227,8 +226,8 @@ func Test_patchKubeadmConfigTemplate(t *testing.T) { name: "fails if builtin.machineDeployment.version is not set for MachineDeployment class == default-worker", template: &bootstrapv1.KubeadmConfigTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - MachineDeployment: &variables.MachineDeploymentBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ Class: "default-worker", }, })}, @@ -240,8 +239,8 @@ func Test_patchKubeadmConfigTemplate(t *testing.T) { name: "set KubeletExtraArgs[cgroup-driver] to cgroupfs for Kubernetes < 1.24 and MachineDeployment class == default-worker", template: &bootstrapv1.KubeadmConfigTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - MachineDeployment: &variables.MachineDeploymentBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ Class: "default-worker", Version: "v1.23.0", }, @@ -265,8 +264,8 @@ func Test_patchKubeadmConfigTemplate(t *testing.T) { name: "do not set KubeletExtraArgs[cgroup-driver] to cgroupfs for Kubernetes >= 1.24 and MachineDeployment class == default-worker", template: &bootstrapv1.KubeadmConfigTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - MachineDeployment: &variables.MachineDeploymentBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ Class: "default-worker", Version: "v1.24.0", }, @@ -309,8 +308,8 @@ func Test_patchDockerMachineTemplate(t *testing.T) { name: "sets customImage for templates linked to ControlPlane", template: &infrav1.DockerMachineTemplate{}, variables: map[string]apiextensionsv1.JSON{ - variables.BuiltinsName: {Raw: toJSON(variables.Builtins{ - ControlPlane: &variables.ControlPlaneBuiltins{ + runtimehooksv1.BuiltinsName: {Raw: toJSON(runtimehooksv1.Builtins{ + ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ Version: "v1.23.0", }, })}, @@ -346,8 +345,8 @@ func TestHandler_GeneratePatches(t *testing.T) { g := NewWithT(t) h := NewExtensionHandlers(testScheme) controlPlaneVarsV123WithMaxSurge := []runtimehooksv1.Variable{ - newVariable(variables.BuiltinsName, variables.Builtins{ - ControlPlane: &variables.ControlPlaneBuiltins{ + newVariable(runtimehooksv1.BuiltinsName, runtimehooksv1.Builtins{ + ControlPlane: &runtimehooksv1.ControlPlaneBuiltins{ Version: "v1.23.0", }, }), @@ -357,8 +356,8 @@ func TestHandler_GeneratePatches(t *testing.T) { newVariable("imageRepository", "docker.io"), } machineDeploymentVars123 := []runtimehooksv1.Variable{ - newVariable(variables.BuiltinsName, variables.Builtins{ - MachineDeployment: &variables.MachineDeploymentBuiltins{ + newVariable(runtimehooksv1.BuiltinsName, runtimehooksv1.Builtins{ + MachineDeployment: &runtimehooksv1.MachineDeploymentBuiltins{ Class: "default-worker", Version: "v1.23.0", }, From 6f57038cb37f7b5a85f5eec8e5b9c241bf851c37 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Wed, 27 Mar 2024 12:57:18 +0100 Subject: [PATCH 03/10] Export webhooks.Cluster DefaultAndValidateVariables method MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- webhooks/alias.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/webhooks/alias.go b/webhooks/alias.go index 1ad94a530a44..1d1271bb1530 100644 --- a/webhooks/alias.go +++ b/webhooks/alias.go @@ -17,9 +17,11 @@ limitations under the License. package webhooks import ( + "k8s.io/apimachinery/pkg/util/validation/field" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/webhooks" ) @@ -41,6 +43,19 @@ func (webhook *Cluster) SetupWebhookWithManager(mgr ctrl.Manager) error { }).SetupWebhookWithManager(mgr) } +// DefaultAndValidateVariables can be used to default and validate variables of a Cluster +// based on the corresponding ClusterClass. +// Before it can be used, all fields of the webhooks.Cluster have to be set +// and SetupWithManager has to be called. +// This method can be used when testing the behavior of the desired state computation of +// the Cluster topology controller (because variables are always defaulted and validated +// before the desired state is computed). +func (webhook *Cluster) DefaultAndValidateVariables(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) field.ErrorList { + // As of today this func is not a method on internal/webhooks.Cluster because it doesn't use + // any of its fields. But it seems more consistent and future-proof to expose it as a method. + return webhooks.DefaultAndValidateVariables(cluster, clusterClass) +} + // ClusterClass implements a validation and defaulting webhook for ClusterClass. type ClusterClass struct { Client client.Reader From 9f6cca48a993ce2d95aa2eb7b281fe2492c2fde9 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Wed, 27 Mar 2024 16:04:21 +0100 Subject: [PATCH 04/10] Export desired state computation --- .../util/topology}/desired_state.go | 179 ++++++++++-------- .../util/topology}/desired_state_test.go | 120 ++++++++---- .../topology/cluster/cluster_controller.go | 12 +- .../topology/cluster/current_state_test.go | 5 +- .../topology/cluster/reconcile_state.go | 62 ++---- .../topology/cluster/reconcile_state_test.go | 46 +++-- internal/controllers/topology/cluster/util.go | 26 --- internal/topology/clustershim/clustershim.go | 47 +++++ internal/topology/names/names.go | 25 +++ internal/topology/ownerrefs/ownerref.go | 46 +++++ internal/topology/selectors/selectors.go | 47 +++++ 11 files changed, 393 insertions(+), 222 deletions(-) rename {internal/controllers/topology/cluster => exp/util/topology}/desired_state.go (90%) rename {internal/controllers/topology/cluster => exp/util/topology}/desired_state_test.go (97%) create mode 100644 internal/topology/clustershim/clustershim.go create mode 100644 internal/topology/ownerrefs/ownerref.go create mode 100644 internal/topology/selectors/selectors.go diff --git a/internal/controllers/topology/cluster/desired_state.go b/exp/util/topology/desired_state.go similarity index 90% rename from internal/controllers/topology/cluster/desired_state.go rename to exp/util/topology/desired_state.go index 0060d0849f95..1aad9497d085 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/exp/util/topology/desired_state.go @@ -14,7 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cluster +// Package topology contains cluster topology utils, e.g. to compute the desired state. +package topology import ( "context" @@ -30,24 +31,60 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/controllers/external" + "sigs.k8s.io/cluster-api/controllers/remote" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" + "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" - "sigs.k8s.io/cluster-api/internal/topology/names" + runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" + "sigs.k8s.io/cluster-api/internal/topology/clustershim" + topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/internal/topology/ownerrefs" + "sigs.k8s.io/cluster-api/internal/topology/selectors" "sigs.k8s.io/cluster-api/internal/webhooks" "sigs.k8s.io/cluster-api/util" ) -// computeDesiredState computes the desired state of the cluster topology. +// DesiredStateEngine is an desiredStateEngine to compute the desired state. +type DesiredStateEngine interface { + ComputeDesiredState(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) + + IsControlPlaneStable(s *scope.Scope) bool +} + +// NewDesiredStateEngine creates a new desired state desiredStateEngine. +func NewDesiredStateEngine(client client.Client, tracker *remote.ClusterCacheTracker, runtimeClient runtimeclient.Client) DesiredStateEngine { + return &desiredStateEngine{ + Client: client, + Tracker: tracker, + RuntimeClient: runtimeClient, + patchEngine: patches.NewEngine(runtimeClient), + } +} + +// desiredStateEngine is an desiredStateEngine to compute the desired state. +// It is used in the cluster topology controller, but it can also be used for testing. +type desiredStateEngine struct { + Client client.Client + + Tracker *remote.ClusterCacheTracker + + RuntimeClient runtimeclient.Client + + // patchEngine is used to apply patches during computeDesiredState. + patchEngine patches.Engine +} + +// ComputeDesiredState computes the desired state of the cluster topology. // NOTE: We are assuming all the required objects are provided as input; also, in case of any error, // the entire compute operation will fail. This might be improved in the future if support for reconciling // subset of a topology will be implemented. -func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { +func (e *desiredStateEngine) ComputeDesiredState(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { var err error desiredState := &scope.ClusterState{ ControlPlane: &scope.ControlPlaneState{}, @@ -70,7 +107,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // - Building the TopologyReconciled condition. // - Make upgrade decisions on the control plane. // - Making upgrade decisions on machine deployments. - mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, r.Client) + mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, e.Client) if err != nil { return nil, errors.Wrap(err, "failed to check if any MachineDeployment is upgrading") } @@ -82,7 +119,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // - Make upgrade decisions on the control plane. // - Making upgrade decisions on machine pools. if len(s.Current.MachinePools) > 0 { - client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) + client, err := e.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) if err != nil { return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading") } @@ -96,7 +133,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // Compute the desired state of the ControlPlane object, eventually adding a reference to the // InfrastructureMachineTemplate generated by the previous step. - if desiredState.ControlPlane.Object, err = r.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil { + if desiredState.ControlPlane.Object, err = e.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil { return nil, errors.Wrapf(err, "failed to compute ControlPlane") } @@ -106,7 +143,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* desiredState.ControlPlane.MachineHealthCheck = computeMachineHealthCheck( ctx, desiredState.ControlPlane.Object, - selectorForControlPlaneMHC(), + selectors.ForControlPlaneMHC(), s.Current.Cluster, s.Blueprint.ControlPlaneMachineHealthCheckClass()) } @@ -121,7 +158,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies // defined in the cluster. if s.Blueprint.HasMachineDeployments() { - desiredState.MachineDeployments, err = r.computeMachineDeployments(ctx, s) + desiredState.MachineDeployments, err = e.computeMachineDeployments(ctx, s) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachineDeployments") } @@ -130,7 +167,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // If required, compute the desired state of the MachinePools from the list of MachinePoolTopologies // defined in the cluster. if s.Blueprint.HasMachinePools() { - desiredState.MachinePools, err = r.computeMachinePools(ctx, s) + desiredState.MachinePools, err = e.computeMachinePools(ctx, s) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachinePools") } @@ -142,7 +179,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* // are preserved during patching. When desired objects are computed their spec is copied from a template, in some cases // further modifications to the spec are made afterwards. In those cases we have to make sure those fields are not overwritten // in apply patches. Some examples are .spec.machineTemplate and .spec.version in control planes. - if err := r.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil { + if err := e.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil { return nil, errors.Wrap(err, "failed to apply patches") } @@ -161,7 +198,7 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu template: template, templateClonedFromRef: templateClonedFromRef, cluster: cluster, - nameGenerator: names.SimpleNameGenerator(fmt.Sprintf("%s-", cluster.Name)), + nameGenerator: topologynames.SimpleNameGenerator(fmt.Sprintf("%s-", cluster.Name)), currentObjectRef: currentRef, // Note: It is not possible to add an ownerRef to Cluster at this stage, otherwise the provisioning // of the infrastructure cluster starts no matter of the object being actually referenced by the Cluster itself. @@ -173,7 +210,7 @@ func computeInfrastructureCluster(_ context.Context, s *scope.Scope) (*unstructu // Carry over shim owner reference if any. // NOTE: this prevents to the ownerRef to be deleted by server side apply. if s.Current.InfrastructureCluster != nil { - shim := clusterShim(s.Current.Cluster) + shim := clustershim.New(s.Current.Cluster) if ref := getOwnerReferenceFrom(s.Current.InfrastructureCluster, shim); ref != nil { infrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ref}) } @@ -203,18 +240,18 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop template: template, templateClonedFromRef: templateClonedFromRef, cluster: cluster, - nameGenerator: names.SimpleNameGenerator(controlPlaneInfrastructureMachineTemplateNamePrefix(cluster.Name)), + nameGenerator: topologynames.SimpleNameGenerator(topologynames.ControlPlaneInfrastructureMachineTemplateNamePrefix(cluster.Name)), currentObjectRef: currentRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and updating the Cluster object // with the reference to the ControlPlane object using this template. - ownerRef: ownerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), + ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), }) } // computeControlPlane computes the desired state for the ControlPlane object starting from the // corresponding template defined in the blueprint. -func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func (e *desiredStateEngine) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref cluster := s.Current.Cluster @@ -244,7 +281,7 @@ func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, in template: template, templateClonedFromRef: templateClonedFromRef, cluster: cluster, - nameGenerator: names.ControlPlaneNameGenerator(nameTemplate, cluster.Name), + nameGenerator: topologynames.ControlPlaneNameGenerator(nameTemplate, cluster.Name), currentObjectRef: currentRef, labels: controlPlaneLabels, annotations: controlPlaneAnnotations, @@ -258,7 +295,7 @@ func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, in // Carry over shim owner reference if any. // NOTE: this prevents to the ownerRef to be deleted by server side apply. if s.Current.ControlPlane != nil && s.Current.ControlPlane.Object != nil { - shim := clusterShim(s.Current.Cluster) + shim := clustershim.New(s.Current.Cluster) if ref := getOwnerReferenceFrom(s.Current.ControlPlane.Object, shim); ref != nil { controlPlane.SetOwnerReferences([]metav1.OwnerReference{*ref}) } @@ -349,7 +386,7 @@ func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, in } // Sets the desired Kubernetes version for the control plane. - version, err := r.computeControlPlaneVersion(ctx, s) + version, err := e.computeControlPlaneVersion(ctx, s) if err != nil { return nil, errors.Wrap(err, "failed to compute version of control plane") } @@ -363,7 +400,7 @@ func (r *Reconciler) computeControlPlane(ctx context.Context, s *scope.Scope, in // computeControlPlaneVersion calculates the version of the desired control plane. // The version is calculated using the state of the current machine deployments, the current control plane // and the version defined in the topology. -func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { +func (e *desiredStateEngine) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { log := tlog.LoggerFrom(ctx) desiredVersion := s.Blueprint.Topology.Version // If we are creating the control plane object (current control plane is nil), use version from topology. @@ -432,7 +469,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc KubernetesVersion: desiredVersion, } hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{} - if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { return "", err } // Add the response to the tracker so we can later update condition or requeue when required. @@ -444,7 +481,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc if hookResponse.RetryAfterSeconds != 0 { log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) } else { - if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { + if err := hooks.MarkAsDone(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { return "", err } } @@ -485,7 +522,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc ToKubernetesVersion: desiredVersion, } hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{} - if err := r.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { return "", err } // Add the response to the tracker so we can later update condition or requeue when required. @@ -498,7 +535,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc // We are picking up the new version here. // Track the intent of calling the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks once we are done with the upgrade. - if err := hooks.MarkAsPending(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { + if err := hooks.MarkAsPending(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { return "", err } } @@ -570,10 +607,10 @@ func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredRe } // computeMachineDeployments computes the desired state of the list of MachineDeployments. -func (r *Reconciler) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { +func (e *desiredStateEngine) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap) for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments { - desiredMachineDeployment, err := computeMachineDeployment(ctx, s, mdTopology) + desiredMachineDeployment, err := e.computeMachineDeployment(ctx, s, mdTopology) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name) } @@ -585,7 +622,7 @@ func (r *Reconciler) computeMachineDeployments(ctx context.Context, s *scope.Sco // computeMachineDeployment computes the desired state for a MachineDeploymentTopology. // The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and // the machineDeployment class. -func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { +func (e *desiredStateEngine) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { desiredMachineDeployment := &scope.MachineDeploymentState{} // Gets the blueprint for the MachineDeployment class. @@ -618,12 +655,12 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy template: machineDeploymentBlueprint.BootstrapTemplate, templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.BootstrapTemplate), cluster: s.Current.Cluster, - nameGenerator: names.SimpleNameGenerator(bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)), + nameGenerator: topologynames.SimpleNameGenerator(topologynames.BootstrapTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)), currentObjectRef: currentBootstrapTemplateRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and creating/updating the MachineDeployment object // with the reference to this template. - ownerRef: ownerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), + ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), }) if err != nil { return nil, err @@ -646,12 +683,12 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy template: machineDeploymentBlueprint.InfrastructureMachineTemplate, templateClonedFromRef: contract.ObjToRef(machineDeploymentBlueprint.InfrastructureMachineTemplate), cluster: s.Current.Cluster, - nameGenerator: names.SimpleNameGenerator(infrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)), + nameGenerator: topologynames.SimpleNameGenerator(topologynames.InfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name, machineDeploymentTopology.Name)), currentObjectRef: currentInfraMachineTemplateRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and creating/updating the MachineDeployment object // with the reference to this template. - ownerRef: ownerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), + ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), }) if err != nil { return nil, err @@ -664,7 +701,7 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy // Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels) - version := computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment) + version := e.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment) // Compute values that can be set both in the MachineDeploymentClass and in the MachineDeploymentTopology minReadySeconds := machineDeploymentClass.MinReadySeconds @@ -712,7 +749,7 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy nameTemplate = *machineDeploymentClass.NamingStrategy.Template } - name, err := names.MachineDeploymentNameGenerator(nameTemplate, s.Current.Cluster.Name, machineDeploymentTopology.Name).GenerateName() + name, err := topologynames.MachineDeploymentNameGenerator(nameTemplate, s.Current.Cluster.Name, machineDeploymentTopology.Name).GenerateName() if err != nil { return nil, errors.Wrap(err, "failed to generate name for MachineDeployment") } @@ -795,7 +832,7 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy desiredMachineDeployment.MachineHealthCheck = computeMachineHealthCheck( ctx, desiredMachineDeploymentObj, - selectorForMachineDeploymentMHC(desiredMachineDeploymentObj), + selectors.ForMachineDeploymentMHC(desiredMachineDeploymentObj), s.Current.Cluster, s.Blueprint.MachineDeploymentMachineHealthCheckClass(&machineDeploymentTopology)) } @@ -805,14 +842,14 @@ func computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploy // computeMachineDeploymentVersion calculates the version of the desired machine deployment. // The version is calculated using the state of the current machine deployments, // the current control plane and the version defined in the topology. -func computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { +func (e *desiredStateEngine) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine deployment, mark it as pending if the control plane is not // yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions. // Example: join could fail if the load balancers are slow in detecting when CP machines are // being deleted. if currentMDState == nil || currentMDState.Object == nil { - if !isControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { + if !e.IsControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { s.UpgradeTracker.MachineDeployments.MarkPendingCreate(machineDeploymentTopology.Name) } return desiredVersion @@ -849,7 +886,7 @@ func computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology c // Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet. // Return the current version of the machine deployment. We will pick up the new version after the control // plane is stable. - if !isControlPlaneStable(s) { + if !e.IsControlPlaneStable(s) { s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name) return currentVersion } @@ -860,8 +897,8 @@ func computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology c return desiredVersion } -// isControlPlaneStable returns true is the ControlPlane is stable. -func isControlPlaneStable(s *scope.Scope) bool { +// IsControlPlaneStable returns true is the ControlPlane is stable. +func (e *desiredStateEngine) IsControlPlaneStable(s *scope.Scope) bool { // If the current control plane is upgrading it is not considered stable. if s.UpgradeTracker.ControlPlane.IsUpgrading { return false @@ -924,10 +961,10 @@ func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology } // computeMachinePools computes the desired state of the list of MachinePools. -func (r *Reconciler) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { +func (e *desiredStateEngine) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { machinePoolsStateMap := make(scope.MachinePoolsStateMap) for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools { - desiredMachinePool, err := computeMachinePool(ctx, s, mpTopology) + desiredMachinePool, err := e.computeMachinePool(ctx, s, mpTopology) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachinePool for topology %q", mpTopology.Name) } @@ -939,7 +976,7 @@ func (r *Reconciler) computeMachinePools(ctx context.Context, s *scope.Scope) (s // computeMachinePool computes the desired state for a MachinePoolTopology. // The generated machinePool object is calculated using the values from the machinePoolTopology and // the machinePool class. -func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { +func (e *desiredStateEngine) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { desiredMachinePool := &scope.MachinePoolState{} // Gets the blueprint for the MachinePool class. @@ -972,12 +1009,12 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c template: machinePoolBlueprint.BootstrapTemplate, templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapTemplate), cluster: s.Current.Cluster, - nameGenerator: names.SimpleNameGenerator(bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)), + nameGenerator: topologynames.SimpleNameGenerator(topologynames.BootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)), currentObjectRef: currentBootstrapConfigRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and creating/updating the MachinePool object // with the reference to this template. - ownerRef: ownerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), + ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), }) if err != nil { return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name) @@ -1000,12 +1037,12 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c template: machinePoolBlueprint.InfrastructureMachinePoolTemplate, templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.InfrastructureMachinePoolTemplate), cluster: s.Current.Cluster, - nameGenerator: names.SimpleNameGenerator(infrastructureMachinePoolNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)), + nameGenerator: topologynames.SimpleNameGenerator(topologynames.InfrastructureMachinePoolNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name)), currentObjectRef: currentInfraMachinePoolRef, // Note: we are adding an ownerRef to Cluster so the template will be automatically garbage collected // in case of errors in between creating this template and creating/updating the MachinePool object // with the reference to this template. - ownerRef: ownerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), + ownerRef: ownerrefs.OwnerReferenceTo(s.Current.Cluster, clusterv1.GroupVersion.WithKind("Cluster")), }) if err != nil { return nil, errors.Wrapf(err, "failed to compute infrastructure object for topology %q", machinePoolTopology.Name) @@ -1018,7 +1055,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c // Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels) - version := computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) + version := e.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) // Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology minReadySeconds := machinePoolClass.MinReadySeconds @@ -1061,7 +1098,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c nameTemplate = *machinePoolClass.NamingStrategy.Template } - name, err := names.MachinePoolNameGenerator(nameTemplate, s.Current.Cluster.Name, machinePoolTopology.Name).GenerateName() + name, err := topologynames.MachinePoolNameGenerator(nameTemplate, s.Current.Cluster.Name, machinePoolTopology.Name).GenerateName() if err != nil { return nil, errors.Wrap(err, "failed to generate name for MachinePool") } @@ -1135,14 +1172,14 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c // computeMachinePoolVersion calculates the version of the desired machine pool. // The version is calculated using the state of the current machine pools, // the current control plane and the version defined in the topology. -func computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { +func (e *desiredStateEngine) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine pool, mark it as pending if the control plane is not // yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions. // Example: join could fail if the load balancers are slow in detecting when CP machines are // being deleted. if currentMPState == nil || currentMPState.Object == nil { - if !isControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { + if !e.IsControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { s.UpgradeTracker.MachinePools.MarkPendingCreate(machinePoolTopology.Name) } return desiredVersion @@ -1179,7 +1216,7 @@ func computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.Mac // Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet. // Return the current version of the machine pool. We will pick up the new version after the control // plane is stable. - if !isControlPlaneStable(s) { + if !e.IsControlPlaneStable(s) { s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name) return currentVersion } @@ -1229,7 +1266,7 @@ type templateToInput struct { template *unstructured.Unstructured templateClonedFromRef *corev1.ObjectReference cluster *clusterv1.Cluster - nameGenerator names.NameGenerator + nameGenerator topologynames.NameGenerator currentObjectRef *corev1.ObjectReference labels map[string]string annotations map[string]string @@ -1345,18 +1382,6 @@ func templateToTemplate(in templateToInput) (*unstructured.Unstructured, error) return template, nil } -// ownerReferenceTo converts an object to an OwnerReference. -// Note: We pass in gvk explicitly as we can't rely on GVK being set on all objects -// (only on Unstructured). -func ownerReferenceTo(obj client.Object, gvk schema.GroupVersionKind) *metav1.OwnerReference { - return &metav1.OwnerReference{ - APIVersion: gvk.GroupVersion().String(), - Kind: gvk.Kind, - Name: obj.GetName(), - UID: obj.GetUID(), - } -} - func computeMachineHealthCheck(ctx context.Context, healthCheckTarget client.Object, selector *metav1.LabelSelector, cluster *clusterv1.Cluster, check *clusterv1.MachineHealthCheckClass) *clusterv1.MachineHealthCheck { // Create a MachineHealthCheck with the spec given in the ClusterClass. mhc := &clusterv1.MachineHealthCheck{ @@ -1373,7 +1398,7 @@ func computeMachineHealthCheck(ctx context.Context, healthCheckTarget client.Obj // Note: we are adding an ownerRef to Cluster so the MHC will be automatically garbage collected // in case deletion is triggered before an object reconcile happens. OwnerReferences: []metav1.OwnerReference{ - *ownerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")), + *ownerrefs.OwnerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")), }, }, Spec: clusterv1.MachineHealthCheckSpec{ @@ -1396,23 +1421,11 @@ func computeMachineHealthCheck(ctx context.Context, healthCheckTarget client.Obj return mhc } -func selectorForControlPlaneMHC() *metav1.LabelSelector { - // The selector returned here is the minimal common selector for all Machines belonging to the ControlPlane. - // It does not include any labels set in ClusterClass, Cluster Topology or elsewhere. - return &metav1.LabelSelector{ - MatchLabels: map[string]string{ - clusterv1.ClusterTopologyOwnedLabel: "", - clusterv1.MachineControlPlaneLabel: "", - }, - } -} - -func selectorForMachineDeploymentMHC(md *clusterv1.MachineDeployment) *metav1.LabelSelector { - // The selector returned here is the minimal common selector for all MachineSets belonging to a MachineDeployment. - // It does not include any labels set in ClusterClass, Cluster Topology or elsewhere. - return &metav1.LabelSelector{MatchLabels: map[string]string{ - clusterv1.ClusterTopologyOwnedLabel: "", - clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel], - }, +func getOwnerReferenceFrom(obj, owner client.Object) *metav1.OwnerReference { + for _, o := range obj.GetOwnerReferences() { + if o.Kind == owner.GetObjectKind().GroupVersionKind().Kind && o.Name == owner.GetName() { + return &o + } } + return nil } diff --git a/internal/controllers/topology/cluster/desired_state_test.go b/exp/util/topology/desired_state_test.go similarity index 97% rename from internal/controllers/topology/cluster/desired_state_test.go rename to exp/util/topology/desired_state_test.go index c9d5179ea466..f3e600d265fd 100644 --- a/internal/controllers/topology/cluster/desired_state_test.go +++ b/exp/util/topology/desired_state_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cluster +package topology import ( "strings" @@ -24,11 +24,15 @@ import ( "github.com/google/go-cmp/cmp" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" utilfeature "k8s.io/component-base/featuregate/testing" "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client/fake" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" @@ -42,10 +46,25 @@ import ( "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/internal/topology/clustershim" "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/internal/topology/ownerrefs" "sigs.k8s.io/cluster-api/util" ) +var ( + ctx = ctrl.SetupSignalHandler() + fakeScheme = runtime.NewScheme() +) + +func init() { + _ = clientgoscheme.AddToScheme(fakeScheme) + _ = clusterv1.AddToScheme(fakeScheme) + _ = apiextensionsv1.AddToScheme(fakeScheme) + _ = expv1.AddToScheme(fakeScheme) + _ = corev1.AddToScheme(fakeScheme) +} + var ( fakeRef1 = &corev1.ObjectReference{ Kind: "refKind1", @@ -135,7 +154,7 @@ func TestComputeInfrastructureCluster(t *testing.T) { }) t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) { g := NewWithT(t) - shim := clusterShim(cluster) + shim := clustershim.New(cluster) // current cluster objects for the test scenario clusterWithInfrastructureRef := cluster.DeepCopy() @@ -144,13 +163,13 @@ func TestComputeInfrastructureCluster(t *testing.T) { // aggregating current cluster objects into ClusterState (simulating getCurrentState) scope := scope.New(clusterWithInfrastructureRef) scope.Current.InfrastructureCluster = infrastructureClusterTemplate.DeepCopy() - scope.Current.InfrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ownerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) + scope.Current.InfrastructureCluster.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) scope.Blueprint = blueprint obj, err := computeInfrastructureCluster(ctx, scope) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) - g.Expect(hasOwnerReferenceFrom(obj, shim)).To(BeTrue()) + g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue()) }) } @@ -319,7 +338,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -380,7 +399,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -410,7 +429,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithoutReplicas) scope.Blueprint = blueprint - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -455,7 +474,7 @@ func TestComputeControlPlane(t *testing.T) { s.Blueprint = blueprint s.Current.ControlPlane = &scope.ControlPlaneState{} - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate) g.Expect(err).ToNot(HaveOccurred()) @@ -516,7 +535,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithControlPlaneRef) scope.Blueprint = blueprint - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -586,7 +605,7 @@ func TestComputeControlPlane(t *testing.T) { Object: tt.currentControlPlane, } - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -597,7 +616,7 @@ func TestComputeControlPlane(t *testing.T) { }) t.Run("Carry over the owner reference to ClusterShim, if any", func(t *testing.T) { g := NewWithT(t) - shim := clusterShim(cluster) + shim := clustershim.New(cluster) // current cluster objects clusterWithoutReplicas := cluster.DeepCopy() @@ -623,15 +642,15 @@ func TestComputeControlPlane(t *testing.T) { }). Build(), } - s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) + s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) s.Blueprint = blueprint - r := &Reconciler{} + r := &desiredStateEngine{} obj, err := r.computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) - g.Expect(hasOwnerReferenceFrom(obj, shim)).To(BeTrue()) + g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue()) }) } @@ -862,11 +881,10 @@ func TestComputeControlPlaneVersion(t *testing.T) { }). Build() - fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build() + fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build() - r := &Reconciler{ + r := &desiredStateEngine{ Client: fakeClient, - APIReader: fakeClient, RuntimeClient: runtimeClient, } version, err := r.computeControlPlaneVersion(ctx, s) @@ -1166,11 +1184,10 @@ func TestComputeControlPlaneVersion(t *testing.T) { WithCatalog(catalog). Build() - fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build() + fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(tt.s.Current.Cluster).Build() - r := &Reconciler{ + r := &desiredStateEngine{ Client: fakeClient, - APIReader: fakeClient, RuntimeClient: fakeRuntimeClient, } @@ -1243,11 +1260,10 @@ func TestComputeControlPlaneVersion(t *testing.T) { }). Build() - fakeClient := fake.NewClientBuilder().WithObjects(s.Current.Cluster).Build() + fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build() - r := &Reconciler{ + r := &desiredStateEngine{ Client: fakeClient, - APIReader: fakeClient, RuntimeClient: runtimeClient, } @@ -1419,7 +1435,9 @@ func TestComputeMachineDeployment(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - actual, err := computeMachineDeployment(ctx, scope, mdTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).ToNot(HaveOccurred()) g.Expect(actual.BootstrapTemplate.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachineDeploymentNameLabel, "big-pool-of-machines")) @@ -1488,7 +1506,9 @@ func TestComputeMachineDeployment(t *testing.T) { // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy } - actual, err := computeMachineDeployment(ctx, scope, mdTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).ToNot(HaveOccurred()) // checking only values from CC defaults @@ -1532,7 +1552,9 @@ func TestComputeMachineDeployment(t *testing.T) { }, } - actual, err := computeMachineDeployment(ctx, s, mdTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachineDeployment(ctx, s, mdTopology) g.Expect(err).ToNot(HaveOccurred()) actualMd := actual.Object @@ -1580,7 +1602,9 @@ func TestComputeMachineDeployment(t *testing.T) { Name: "big-pool-of-machines", } - _, err := computeMachineDeployment(ctx, scope, mdTopology) + e := desiredStateEngine{} + + _, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).To(HaveOccurred()) }) @@ -1692,7 +1716,10 @@ func TestComputeMachineDeployment(t *testing.T) { Replicas: ptr.To[int32](2), } s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) - obj, err := computeMachineDeployment(ctx, s, mdTopology) + + e := desiredStateEngine{} + + obj, err := e.computeMachineDeployment(ctx, s, mdTopology) g.Expect(err).ToNot(HaveOccurred()) g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) }) @@ -1708,7 +1735,9 @@ func TestComputeMachineDeployment(t *testing.T) { Name: "big-pool-of-machines", } - actual, err := computeMachineDeployment(ctx, scope, mdTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).ToNot(HaveOccurred()) // Check that the ClusterName and selector are set properly for the MachineHealthCheck. g.Expect(actual.MachineHealthCheck.Spec.ClusterName).To(Equal(cluster.Name)) @@ -1817,7 +1846,9 @@ func TestComputeMachinePool(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - actual, err := computeMachinePool(ctx, scope, mpTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachinePool(ctx, scope, mpTopology) g.Expect(err).ToNot(HaveOccurred()) g.Expect(actual.BootstrapObject.GetLabels()).To(HaveKeyWithValue(clusterv1.ClusterTopologyMachinePoolNameLabel, "big-pool-of-machines")) @@ -1880,7 +1911,9 @@ func TestComputeMachinePool(t *testing.T) { // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy } - actual, err := computeMachinePool(ctx, scope, mpTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachinePool(ctx, scope, mpTopology) g.Expect(err).ToNot(HaveOccurred()) // checking only values from CC defaults @@ -1923,7 +1956,9 @@ func TestComputeMachinePool(t *testing.T) { }, } - actual, err := computeMachinePool(ctx, s, mpTopology) + e := desiredStateEngine{} + + actual, err := e.computeMachinePool(ctx, s, mpTopology) g.Expect(err).ToNot(HaveOccurred()) actualMp := actual.Object @@ -1966,7 +2001,9 @@ func TestComputeMachinePool(t *testing.T) { Name: "big-pool-of-machines", } - _, err := computeMachinePool(ctx, scope, mpTopology) + e := desiredStateEngine{} + + _, err := e.computeMachinePool(ctx, scope, mpTopology) g.Expect(err).To(HaveOccurred()) }) @@ -2076,7 +2113,10 @@ func TestComputeMachinePool(t *testing.T) { Replicas: ptr.To[int32](2), } s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) - obj, err := computeMachinePool(ctx, s, mpTopology) + + e := desiredStateEngine{} + + obj, err := e.computeMachinePool(ctx, s, mpTopology) g.Expect(err).ToNot(HaveOccurred()) g.Expect(*obj.Object.Spec.Template.Spec.Version).To(Equal(tt.expectedVersion)) }) @@ -2240,7 +2280,10 @@ func TestComputeMachineDeploymentVersion(t *testing.T) { s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) - version := computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState) + + e := desiredStateEngine{} + + version := e.computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState) g.Expect(version).To(Equal(tt.expectedVersion)) if tt.currentMachineDeploymentState != nil { @@ -2418,7 +2461,10 @@ func TestComputeMachinePoolVersion(t *testing.T) { s.UpgradeTracker.ControlPlane.IsScaling = tt.controlPlaneScaling s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) - version := computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState) + + e := desiredStateEngine{} + + version := e.computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState) g.Expect(version).To(Equal(tt.expectedVersion)) if tt.currentMachinePoolState != nil { @@ -2889,7 +2935,7 @@ func Test_computeMachineHealthCheck(t *testing.T) { clusterv1.ClusterTopologyOwnedLabel: "", }, OwnerReferences: []metav1.OwnerReference{ - *ownerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")), + *ownerrefs.OwnerReferenceTo(cluster, clusterv1.GroupVersion.WithKind("Cluster")), }, }, Spec: clusterv1.MachineHealthCheckSpec{ diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index afa3b635fe77..da85708a4d69 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -40,8 +40,8 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" @@ -84,8 +84,8 @@ type Reconciler struct { externalTracker external.ObjectTracker recorder record.EventRecorder - // patchEngine is used to apply patches during computeDesiredState. - patchEngine patches.Engine + // desiredStateEngine is used to apply patches during computeDesiredState. + desiredStateEngine topology.DesiredStateEngine patchHelperFactory structuredmerge.PatchHelperFactoryFunc } @@ -125,7 +125,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt Controller: c, Cache: mgr.GetCache(), } - r.patchEngine = patches.NewEngine(r.RuntimeClient) + r.desiredStateEngine = topology.NewDesiredStateEngine(r.Client, r.Tracker, r.RuntimeClient) r.recorder = mgr.GetEventRecorderFor("topology/cluster-controller") if r.patchHelperFactory == nil { r.patchHelperFactory = serverSideApplyPatchHelperFactory(r.Client, ssa.NewCache()) @@ -135,7 +135,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt // SetupForDryRun prepares the Reconciler for a dry run execution. func (r *Reconciler) SetupForDryRun(recorder record.EventRecorder) { - r.patchEngine = patches.NewEngine(r.RuntimeClient) + r.desiredStateEngine = topology.NewDesiredStateEngine(r.Client, r.Tracker, r.RuntimeClient) r.recorder = recorder r.patchHelperFactory = dryRunPatchHelperFactory(r.Client) } @@ -272,7 +272,7 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope.Scope) (ctrl.Result } // Computes the desired state of the Cluster and store it in the request scope. - s.Desired, err = r.computeDesiredState(ctx, s) + s.Desired, err = r.desiredStateEngine.ComputeDesiredState(ctx, s) if err != nil { return ctrl.Result{}, errors.Wrap(err, "error computing the desired state of the Cluster topology") } diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index 88908d9b7646..2a525e27e7ad 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -32,6 +32,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/internal/topology/selectors" ) func TestGetCurrentState(t *testing.T) { @@ -126,7 +127,7 @@ func TestGetCurrentState(t *testing.T) { // MachineHealthChecks for the MachineDeployment and the ControlPlane. machineHealthCheckForMachineDeployment := builder.MachineHealthCheck(machineDeployment.Namespace, machineDeployment.Name). - WithSelector(*selectorForMachineDeploymentMHC(machineDeployment)). + WithSelector(*selectors.ForMachineDeploymentMHC(machineDeployment)). WithUnhealthyConditions([]clusterv1.UnhealthyCondition{ { Type: corev1.NodeReady, @@ -143,7 +144,7 @@ func TestGetCurrentState(t *testing.T) { Build() machineHealthCheckForControlPlane := builder.MachineHealthCheck(controlPlane.GetNamespace(), controlPlane.GetName()). - WithSelector(*selectorForControlPlaneMHC()). + WithSelector(*selectors.ForControlPlaneMHC()). WithUnhealthyConditions([]clusterv1.UnhealthyCondition{ { Type: corev1.NodeReady, diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 510bd1a65b65..056b95fb3438 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -25,7 +25,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" kerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" @@ -46,6 +45,9 @@ import ( "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/internal/topology/check" + "sigs.k8s.io/cluster-api/internal/topology/clustershim" + topologynames "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/internal/topology/ownerrefs" "sigs.k8s.io/cluster-api/util" ) @@ -116,7 +118,7 @@ func (r *Reconciler) reconcileState(ctx context.Context, s *scope.Scope) error { // Reconcile the Cluster shim, a temporary object used a mean to collect objects/templates // that might be orphaned in case of errors during the remaining part of the reconcile process. func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) error { - shim := clusterShim(s.Current.Cluster) + shim := clustershim.New(s.Current.Cluster) // If we are going to create the InfrastructureCluster or the ControlPlane object, then // add a temporary cluster-shim object and use it as an additional owner. @@ -142,14 +144,14 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e // Add the shim as a temporary owner for the InfrastructureCluster. s.Desired.InfrastructureCluster.SetOwnerReferences( util.EnsureOwnerRef(s.Desired.InfrastructureCluster.GetOwnerReferences(), - *ownerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret")), + *ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret")), ), ) // Add the shim as a temporary owner for the ControlPlane. s.Desired.ControlPlane.Object.SetOwnerReferences( util.EnsureOwnerRef(s.Desired.ControlPlane.Object.GetOwnerReferences(), - *ownerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret")), + *ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret")), ), ) } @@ -161,10 +163,10 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e // When the Cluster and the shim object are both owners, // it's safe for us to remove the shim and garbage collect any potential orphaned resource. if s.Current.InfrastructureCluster != nil && s.Current.ControlPlane.Object != nil { - clusterOwnsAll := hasOwnerReferenceFrom(s.Current.InfrastructureCluster, s.Current.Cluster) && - hasOwnerReferenceFrom(s.Current.ControlPlane.Object, s.Current.Cluster) - shimOwnsAtLeastOne := hasOwnerReferenceFrom(s.Current.InfrastructureCluster, shim) || - hasOwnerReferenceFrom(s.Current.ControlPlane.Object, shim) + clusterOwnsAll := ownerrefs.HasOwnerReferenceFrom(s.Current.InfrastructureCluster, s.Current.Cluster) && + ownerrefs.HasOwnerReferenceFrom(s.Current.ControlPlane.Object, s.Current.Cluster) + shimOwnsAtLeastOne := ownerrefs.HasOwnerReferenceFrom(s.Current.InfrastructureCluster, shim) || + ownerrefs.HasOwnerReferenceFrom(s.Current.ControlPlane.Object, shim) if clusterOwnsAll && shimOwnsAtLeastOne { if err := r.Client.Delete(ctx, shim); err != nil { @@ -177,42 +179,6 @@ func (r *Reconciler) reconcileClusterShim(ctx context.Context, s *scope.Scope) e return nil } -func clusterShim(c *clusterv1.Cluster) *corev1.Secret { - shim := &corev1.Secret{ - TypeMeta: metav1.TypeMeta{ - Kind: "Secret", - APIVersion: corev1.SchemeGroupVersion.String(), - }, - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-shim", c.Name), - Namespace: c.Namespace, - OwnerReferences: []metav1.OwnerReference{ - *ownerReferenceTo(c, clusterv1.GroupVersion.WithKind("Cluster")), - }, - }, - Type: clusterv1.ClusterSecretType, - } - return shim -} - -func hasOwnerReferenceFrom(obj, owner client.Object) bool { - for _, o := range obj.GetOwnerReferences() { - if o.Kind == owner.GetObjectKind().GroupVersionKind().Kind && o.Name == owner.GetName() { - return true - } - } - return false -} - -func getOwnerReferenceFrom(obj, owner client.Object) *metav1.OwnerReference { - for _, o := range obj.GetOwnerReferences() { - if o.Kind == owner.GetObjectKind().GroupVersionKind().Kind && o.Name == owner.GetName() { - return &o - } - } - return nil -} - func (r *Reconciler) callAfterHooks(ctx context.Context, s *scope.Scope) error { if err := r.callAfterControlPlaneInitialized(ctx, s); err != nil { return err @@ -272,7 +238,7 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope // - MachineDeployments/MachinePools are not currently upgrading // - MachineDeployments/MachinePools are not pending an upgrade // - MachineDeployments/MachinePools are not pending create - if isControlPlaneStable(s) && // Control Plane stable checks + if r.desiredStateEngine.IsControlPlaneStable(s) && // Control Plane stable checks len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade !s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create !s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() && // No MachineDeployments are pending an upgrade @@ -354,7 +320,7 @@ func (r *Reconciler) reconcileControlPlane(ctx context.Context, s *scope.Scope) current: s.Current.ControlPlane.InfrastructureMachineTemplate, desired: s.Desired.ControlPlane.InfrastructureMachineTemplate, compatibilityChecker: check.ObjectsAreCompatible, - templateNamePrefix: controlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name), + templateNamePrefix: topologynames.ControlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name), }) if err != nil { return false, err @@ -722,7 +688,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope ref: &desiredMD.Object.Spec.Template.Spec.InfrastructureRef, current: currentMD.InfrastructureMachineTemplate, desired: desiredMD.InfrastructureMachineTemplate, - templateNamePrefix: infrastructureMachineTemplateNamePrefix(cluster.Name, mdTopologyName), + templateNamePrefix: topologynames.InfrastructureMachineTemplateNamePrefix(cluster.Name, mdTopologyName), compatibilityChecker: check.ObjectsAreCompatible, }) if err != nil { @@ -749,7 +715,7 @@ func (r *Reconciler) updateMachineDeployment(ctx context.Context, s *scope.Scope ref: desiredMD.Object.Spec.Template.Spec.Bootstrap.ConfigRef, current: currentMD.BootstrapTemplate, desired: desiredMD.BootstrapTemplate, - templateNamePrefix: bootstrapTemplateNamePrefix(cluster.Name, mdTopologyName), + templateNamePrefix: topologynames.BootstrapTemplateNamePrefix(cluster.Name, mdTopologyName), compatibilityChecker: check.ObjectsAreInTheSameNamespace, }) if err != nil { diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 09f45886d6bd..d271d435498f 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -44,12 +44,17 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" + "sigs.k8s.io/cluster-api/internal/topology/clustershim" + "sigs.k8s.io/cluster-api/internal/topology/names" + "sigs.k8s.io/cluster-api/internal/topology/ownerrefs" + "sigs.k8s.io/cluster-api/internal/topology/selectors" "sigs.k8s.io/cluster-api/internal/util/ssa" "sigs.k8s.io/cluster-api/internal/webhooks" ) @@ -77,7 +82,7 @@ func TestReconcileShim(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) cluster1 := cluster.DeepCopy() cluster1.SetNamespace(namespace.GetName()) - cluster1Shim := clusterShim(cluster1) + cluster1Shim := clustershim.New(cluster1) // Create a scope with a cluster and InfrastructureCluster yet to be created. s := scope.New(cluster1) @@ -117,7 +122,7 @@ func TestReconcileShim(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) cluster1 := cluster.DeepCopy() cluster1.SetNamespace(namespace.GetName()) - cluster1Shim := clusterShim(cluster1) + cluster1Shim := clustershim.New(cluster1) // Create a scope with a cluster and InfrastructureCluster yet to be created. s := scope.New(cluster1) @@ -161,7 +166,7 @@ func TestReconcileShim(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) cluster1 := cluster.DeepCopy() cluster1.SetNamespace(namespace.GetName()) - cluster1Shim := clusterShim(cluster1) + cluster1Shim := clustershim.New(cluster1) // Create a scope with a cluster and InfrastructureCluster created but not yet reconciled. s := scope.New(cluster1) @@ -172,10 +177,10 @@ func TestReconcileShim(t *testing.T) { // Add the shim as a temporary owner for the InfrastructureCluster and ControlPlane. ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences() - ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret"))) + ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret"))) s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs) ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences() - ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret"))) + ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret"))) s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs) // Pre-create a shim @@ -205,7 +210,7 @@ func TestReconcileShim(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) cluster1 := cluster.DeepCopy() cluster1.SetNamespace(namespace.GetName()) - cluster1Shim := clusterShim(cluster1) + cluster1Shim := clustershim.New(cluster1) // Create a scope with a cluster and InfrastructureCluster created and reconciled. s := scope.New(cluster1) @@ -219,14 +224,14 @@ func TestReconcileShim(t *testing.T) { ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences() ownerRefs = append( ownerRefs, - *ownerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")), - *ownerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) + *ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")), + *ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs) ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences() ownerRefs = append( ownerRefs, - *ownerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")), - *ownerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) + *ownerrefs.OwnerReferenceTo(cluster1Shim, corev1.SchemeGroupVersion.WithKind("Secret")), + *ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs) // Pre-create a shim @@ -256,7 +261,7 @@ func TestReconcileShim(t *testing.T) { g.Expect(err).ToNot(HaveOccurred()) cluster1 := cluster.DeepCopy() cluster1.SetNamespace(namespace.GetName()) - cluster1Shim := clusterShim(cluster1) + cluster1Shim := clustershim.New(cluster1) // Create a scope with a cluster and InfrastructureCluster created and reconciled. s := scope.New(cluster1) @@ -267,10 +272,10 @@ func TestReconcileShim(t *testing.T) { // Add the cluster as a final owner for the InfrastructureCluster and ControlPlane (reconciled). ownerRefs := s.Current.InfrastructureCluster.GetOwnerReferences() - ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) + ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) s.Current.InfrastructureCluster.SetOwnerReferences(ownerRefs) ownerRefs = s.Current.ControlPlane.Object.GetOwnerReferences() - ownerRefs = append(ownerRefs, *ownerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) + ownerRefs = append(ownerRefs, *ownerrefs.OwnerReferenceTo(cluster1, clusterv1.GroupVersion.WithKind("Cluster"))) s.Current.ControlPlane.Object.SetOwnerReferences(ownerRefs) // Run reconcileClusterShim using a nil client, so an error will be triggered if any operation is attempted @@ -1075,9 +1080,10 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build() r := &Reconciler{ - Client: fakeClient, - APIReader: fakeClient, - RuntimeClient: fakeRuntimeClient, + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: fakeRuntimeClient, + desiredStateEngine: topology.NewDesiredStateEngine(fakeClient, nil, fakeRuntimeClient), } err := r.callAfterClusterUpgrade(ctx, tt.s) @@ -1615,7 +1621,7 @@ func TestReconcileControlPlane(t *testing.T) { // This check is just for the naming format uses by generated templates - here it's templateName-* // This check is only performed when we had an initial template that has been changed if gotRotation { - pattern := fmt.Sprintf("%s.*", controlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name)) + pattern := fmt.Sprintf("%s.*", names.ControlPlaneInfrastructureMachineTemplateNamePrefix(s.Current.Cluster.Name)) ok, err := regexp.Match(pattern, []byte(gotInfrastructureMachineRef.Name)) g.Expect(err).ToNot(HaveOccurred()) g.Expect(ok).To(BeTrue()) @@ -1750,7 +1756,7 @@ func TestReconcileControlPlaneMachineHealthCheck(t *testing.T) { Build() mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "cp1"). - WithSelector(*selectorForControlPlaneMHC()). + WithSelector(*selectors.ForControlPlaneMHC()). WithUnhealthyConditions(mhcClass.UnhealthyConditions). WithClusterName("cluster1") @@ -3295,7 +3301,7 @@ func TestReconcileMachineDeploymentMachineHealthCheck(t *testing.T) { maxUnhealthy := intstr.Parse("45%") mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "md-1"). - WithSelector(*selectorForMachineDeploymentMHC(md)). + WithSelector(*selectors.ForMachineDeploymentMHC(md)). WithUnhealthyConditions([]clusterv1.UnhealthyCondition{ { Type: corev1.NodeReady, @@ -3693,7 +3699,7 @@ func TestReconciler_reconcileMachineHealthCheck(t *testing.T) { // create a controlPlane object with enough information to be used as an OwnerReference for the MachineHealthCheck. cp := builder.ControlPlane(metav1.NamespaceDefault, "cp1").Build() mhcBuilder := builder.MachineHealthCheck(metav1.NamespaceDefault, "cp1"). - WithSelector(*selectorForControlPlaneMHC()). + WithSelector(*selectors.ForControlPlaneMHC()). WithUnhealthyConditions([]clusterv1.UnhealthyCondition{ { Type: corev1.NodeReady, diff --git a/internal/controllers/topology/cluster/util.go b/internal/controllers/topology/cluster/util.go index 5fa6711c1393..ada48570c919 100644 --- a/internal/controllers/topology/cluster/util.go +++ b/internal/controllers/topology/cluster/util.go @@ -18,7 +18,6 @@ package cluster import ( "context" - "fmt" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -27,31 +26,6 @@ import ( "sigs.k8s.io/cluster-api/controllers/external" ) -// bootstrapTemplateNamePrefix calculates the name prefix for a BootstrapTemplate. -func bootstrapTemplateNamePrefix(clusterName, machineDeploymentTopologyName string) string { - return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) -} - -// infrastructureMachineTemplateNamePrefix calculates the name prefix for a InfrastructureMachineTemplate. -func infrastructureMachineTemplateNamePrefix(clusterName, machineDeploymentTopologyName string) string { - return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) -} - -// bootstrapConfigNamePrefix calculates the name prefix for a BootstrapConfig. -func bootstrapConfigNamePrefix(clusterName, machinePoolTopologyName string) string { - return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) -} - -// infrastructureMachinePoolNamePrefix calculates the name prefix for a InfrastructureMachinePool. -func infrastructureMachinePoolNamePrefix(clusterName, machinePoolTopologyName string) string { - return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) -} - -// infrastructureMachineTemplateNamePrefix calculates the name prefix for a InfrastructureMachineTemplate. -func controlPlaneInfrastructureMachineTemplateNamePrefix(clusterName string) string { - return fmt.Sprintf("%s-", clusterName) -} - // getReference gets the object referenced in ref. func (r *Reconciler) getReference(ctx context.Context, ref *corev1.ObjectReference) (*unstructured.Unstructured, error) { if ref == nil { diff --git a/internal/topology/clustershim/clustershim.go b/internal/topology/clustershim/clustershim.go new file mode 100644 index 000000000000..630192a979f7 --- /dev/null +++ b/internal/topology/clustershim/clustershim.go @@ -0,0 +1,47 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package clustershim contains clustershim utils. +package clustershim + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/internal/topology/ownerrefs" +) + +// New creates a new clustershim. +func New(c *clusterv1.Cluster) *corev1.Secret { + shim := &corev1.Secret{ + TypeMeta: metav1.TypeMeta{ + Kind: "Secret", + APIVersion: corev1.SchemeGroupVersion.String(), + }, + ObjectMeta: metav1.ObjectMeta{ + Name: fmt.Sprintf("%s-shim", c.Name), + Namespace: c.Namespace, + OwnerReferences: []metav1.OwnerReference{ + *ownerrefs.OwnerReferenceTo(c, clusterv1.GroupVersion.WithKind("Cluster")), + }, + }, + Type: clusterv1.ClusterSecretType, + } + return shim +} diff --git a/internal/topology/names/names.go b/internal/topology/names/names.go index d4caf29ae742..33e419c3584a 100644 --- a/internal/topology/names/names.go +++ b/internal/topology/names/names.go @@ -126,3 +126,28 @@ func (g *templateGenerator) GenerateName() (string, error) { return name, nil } + +// BootstrapTemplateNamePrefix calculates the name prefix for a BootstrapTemplate. +func BootstrapTemplateNamePrefix(clusterName, machineDeploymentTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) +} + +// InfrastructureMachineTemplateNamePrefix calculates the name prefix for a InfrastructureMachineTemplate. +func InfrastructureMachineTemplateNamePrefix(clusterName, machineDeploymentTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) +} + +// BootstrapConfigNamePrefix calculates the name prefix for a BootstrapConfig. +func BootstrapConfigNamePrefix(clusterName, machinePoolTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) +} + +// InfrastructureMachinePoolNamePrefix calculates the name prefix for a InfrastructureMachinePool. +func InfrastructureMachinePoolNamePrefix(clusterName, machinePoolTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) +} + +// ControlPlaneInfrastructureMachineTemplateNamePrefix calculates the name prefix for a InfrastructureMachineTemplate. +func ControlPlaneInfrastructureMachineTemplateNamePrefix(clusterName string) string { + return fmt.Sprintf("%s-", clusterName) +} diff --git a/internal/topology/ownerrefs/ownerref.go b/internal/topology/ownerrefs/ownerref.go new file mode 100644 index 000000000000..a610c455bd72 --- /dev/null +++ b/internal/topology/ownerrefs/ownerref.go @@ -0,0 +1,46 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package ownerrefs contains ownerref utils. +package ownerrefs + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// OwnerReferenceTo converts an object to an OwnerReference. +// Note: We pass in gvk explicitly as we can't rely on GVK being set on all objects +// (only on Unstructured). +func OwnerReferenceTo(obj client.Object, gvk schema.GroupVersionKind) *metav1.OwnerReference { + return &metav1.OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: obj.GetName(), + UID: obj.GetUID(), + } +} + +// HasOwnerReferenceFrom checks if obj has an ownerRef pointing to owner. +func HasOwnerReferenceFrom(obj, owner client.Object) bool { + for _, o := range obj.GetOwnerReferences() { + if o.Kind == owner.GetObjectKind().GroupVersionKind().Kind && o.Name == owner.GetName() { + return true + } + } + return false +} diff --git a/internal/topology/selectors/selectors.go b/internal/topology/selectors/selectors.go new file mode 100644 index 000000000000..08a869693c73 --- /dev/null +++ b/internal/topology/selectors/selectors.go @@ -0,0 +1,47 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package selectors contains selectors utils. +package selectors + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +// ForMachineDeploymentMHC generates a selector for MachineDeployment MHCs. +func ForMachineDeploymentMHC(md *clusterv1.MachineDeployment) *metav1.LabelSelector { + // The selector returned here is the minimal common selector for all MachineSets belonging to a MachineDeployment. + // It does not include any labels set in ClusterClass, Cluster Topology or elsewhere. + return &metav1.LabelSelector{MatchLabels: map[string]string{ + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.ClusterTopologyMachineDeploymentNameLabel: md.Spec.Selector.MatchLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel], + }, + } +} + +// ForControlPlaneMHC generates a selector for control plane MHCs. +func ForControlPlaneMHC() *metav1.LabelSelector { + // The selector returned here is the minimal common selector for all Machines belonging to the ControlPlane. + // It does not include any labels set in ClusterClass, Cluster Topology or elsewhere. + return &metav1.LabelSelector{ + MatchLabels: map[string]string{ + clusterv1.ClusterTopologyOwnedLabel: "", + clusterv1.MachineControlPlaneLabel: "", + }, + } +} From 7528e3d26e457510865a1ef7dd73e7447a3a8875 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Wed, 27 Mar 2024 16:38:36 +0100 Subject: [PATCH 05/10] Export scope for desired state computation --- exp/util/topology/desired_state.go | 2 +- exp/util/topology/desired_state_test.go | 2 +- .../topology/cluster => exp/util/topology}/scope/blueprint.go | 0 .../cluster => exp/util/topology}/scope/blueprint_test.go | 0 .../topology/cluster => exp/util/topology}/scope/doc.go | 0 .../cluster => exp/util/topology}/scope/hookresponsetracker.go | 0 .../util/topology}/scope/hookresponsetracker_test.go | 0 .../topology/cluster => exp/util/topology}/scope/scope.go | 0 .../topology/cluster => exp/util/topology}/scope/scope_test.go | 0 .../topology/cluster => exp/util/topology}/scope/state.go | 0 .../topology/cluster => exp/util/topology}/scope/state_test.go | 0 .../cluster => exp/util/topology}/scope/upgradetracker.go | 0 .../cluster => exp/util/topology}/scope/upgradetracker_test.go | 0 internal/controllers/topology/cluster/blueprint.go | 2 +- internal/controllers/topology/cluster/blueprint_test.go | 2 +- internal/controllers/topology/cluster/cluster_controller.go | 2 +- .../controllers/topology/cluster/cluster_controller_test.go | 2 +- internal/controllers/topology/cluster/conditions.go | 2 +- internal/controllers/topology/cluster/conditions_test.go | 2 +- internal/controllers/topology/cluster/current_state.go | 2 +- internal/controllers/topology/cluster/current_state_test.go | 2 +- internal/controllers/topology/cluster/patches/engine.go | 2 +- internal/controllers/topology/cluster/patches/engine_test.go | 2 +- internal/controllers/topology/cluster/reconcile_state.go | 2 +- internal/controllers/topology/cluster/reconcile_state_test.go | 2 +- 25 files changed, 14 insertions(+), 14 deletions(-) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/blueprint.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/blueprint_test.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/doc.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/hookresponsetracker.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/hookresponsetracker_test.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/scope.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/scope_test.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/state.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/state_test.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/upgradetracker.go (100%) rename {internal/controllers/topology/cluster => exp/util/topology}/scope/upgradetracker_test.go (100%) diff --git a/exp/util/topology/desired_state.go b/exp/util/topology/desired_state.go index 1aad9497d085..5f57fd6da154 100644 --- a/exp/util/topology/desired_state.go +++ b/exp/util/topology/desired_state.go @@ -35,10 +35,10 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" diff --git a/exp/util/topology/desired_state_test.go b/exp/util/topology/desired_state_test.go index f3e600d265fd..89b140e11af5 100644 --- a/exp/util/topology/desired_state_test.go +++ b/exp/util/topology/desired_state_test.go @@ -40,9 +40,9 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" diff --git a/internal/controllers/topology/cluster/scope/blueprint.go b/exp/util/topology/scope/blueprint.go similarity index 100% rename from internal/controllers/topology/cluster/scope/blueprint.go rename to exp/util/topology/scope/blueprint.go diff --git a/internal/controllers/topology/cluster/scope/blueprint_test.go b/exp/util/topology/scope/blueprint_test.go similarity index 100% rename from internal/controllers/topology/cluster/scope/blueprint_test.go rename to exp/util/topology/scope/blueprint_test.go diff --git a/internal/controllers/topology/cluster/scope/doc.go b/exp/util/topology/scope/doc.go similarity index 100% rename from internal/controllers/topology/cluster/scope/doc.go rename to exp/util/topology/scope/doc.go diff --git a/internal/controllers/topology/cluster/scope/hookresponsetracker.go b/exp/util/topology/scope/hookresponsetracker.go similarity index 100% rename from internal/controllers/topology/cluster/scope/hookresponsetracker.go rename to exp/util/topology/scope/hookresponsetracker.go diff --git a/internal/controllers/topology/cluster/scope/hookresponsetracker_test.go b/exp/util/topology/scope/hookresponsetracker_test.go similarity index 100% rename from internal/controllers/topology/cluster/scope/hookresponsetracker_test.go rename to exp/util/topology/scope/hookresponsetracker_test.go diff --git a/internal/controllers/topology/cluster/scope/scope.go b/exp/util/topology/scope/scope.go similarity index 100% rename from internal/controllers/topology/cluster/scope/scope.go rename to exp/util/topology/scope/scope.go diff --git a/internal/controllers/topology/cluster/scope/scope_test.go b/exp/util/topology/scope/scope_test.go similarity index 100% rename from internal/controllers/topology/cluster/scope/scope_test.go rename to exp/util/topology/scope/scope_test.go diff --git a/internal/controllers/topology/cluster/scope/state.go b/exp/util/topology/scope/state.go similarity index 100% rename from internal/controllers/topology/cluster/scope/state.go rename to exp/util/topology/scope/state.go diff --git a/internal/controllers/topology/cluster/scope/state_test.go b/exp/util/topology/scope/state_test.go similarity index 100% rename from internal/controllers/topology/cluster/scope/state_test.go rename to exp/util/topology/scope/state_test.go diff --git a/internal/controllers/topology/cluster/scope/upgradetracker.go b/exp/util/topology/scope/upgradetracker.go similarity index 100% rename from internal/controllers/topology/cluster/scope/upgradetracker.go rename to exp/util/topology/scope/upgradetracker.go diff --git a/internal/controllers/topology/cluster/scope/upgradetracker_test.go b/exp/util/topology/scope/upgradetracker_test.go similarity index 100% rename from internal/controllers/topology/cluster/scope/upgradetracker_test.go rename to exp/util/topology/scope/upgradetracker_test.go diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index b57d5fae2475..a30b48fb2881 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" tlog "sigs.k8s.io/cluster-api/internal/log" ) diff --git a/internal/controllers/topology/cluster/blueprint_test.go b/internal/controllers/topology/cluster/blueprint_test.go index 886a5dcc3f7c..73bff3407605 100644 --- a/internal/controllers/topology/cluster/blueprint_test.go +++ b/internal/controllers/topology/cluster/blueprint_test.go @@ -28,7 +28,7 @@ import ( . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/internal/test/builder" ) diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index da85708a4d69..55f4c1103436 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -41,8 +41,8 @@ import ( runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/util/topology" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index b1be769fa553..2239d5bea2fc 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -40,9 +40,9 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 1269e490839e..67d50d557f33 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -23,8 +23,8 @@ import ( "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index 29e0c41e66a9..2c211a511e2a 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -30,7 +30,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index ecd52ef4ef96..6f7d1d0ab785 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -29,8 +29,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util/labels" ) diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index 2a525e27e7ad..68e6cbcc478a 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -30,7 +30,7 @@ import ( . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/topology/selectors" ) diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index da55a7a03689..08c513f17882 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -31,13 +31,13 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/api" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/external" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/inline" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/variables" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" tlog "sigs.k8s.io/cluster-api/internal/log" runtimeclient "sigs.k8s.io/cluster-api/internal/runtime/client" ) diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index 23b2843afafc..823c90f95ed9 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -35,8 +35,8 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" ) diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 056b95fb3438..d3f161208176 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -38,9 +38,9 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" tlog "sigs.k8s.io/cluster-api/internal/log" diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index d271d435498f..edb8137d7e38 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -45,8 +45,8 @@ import ( runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/util/topology" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" - "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/scope" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" From f6012ba55cfc38dae4fad64eb4c801c02702d441 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Wed, 27 Mar 2024 17:24:33 +0100 Subject: [PATCH 06/10] Add integration test to test extension --- .../handler_integration_test.go | 486 ++++++++++++++++++ .../clusterclass-quick-start-runtimesdk.yaml | 148 ++++++ test/go.mod | 20 + test/go.sum | 94 ++++ 4 files changed, 748 insertions(+) create mode 100644 test/extension/handlers/topologymutation/handler_integration_test.go create mode 100644 test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go new file mode 100644 index 000000000000..5fd965a69379 --- /dev/null +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -0,0 +1,486 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package topologymutation + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "io" + "os" + "testing" + + . "github.com/onsi/gomega" + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" + apiyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/client-go/rest" + utilfeature "k8s.io/component-base/featuregate/testing" + "k8s.io/utils/ptr" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/yaml" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + "sigs.k8s.io/cluster-api/controllers" + runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" + runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" + runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" + "sigs.k8s.io/cluster-api/exp/util/topology" + "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/feature" + "sigs.k8s.io/cluster-api/util/contract" + "sigs.k8s.io/cluster-api/webhooks" +) + +var ( + ctx = ctrl.SetupSignalHandler() +) + +func TestHandler(t *testing.T) { + g := NewWithT(t) + + // Enable RuntimeSDK for this test so we can use RuntimeExtensions. + defer utilfeature.SetFeatureGateDuringTest(t, feature.Gates, feature.RuntimeSDK, true)() + + // Get a scope based on the Cluster and ClusterClass. + cluster := getCluster() + clusterVariableImageRepository := "kindest" + cluster.Spec.Topology.Variables = []clusterv1.ClusterVariable{ + { + DefinitionFrom: "test-patch", + Name: "imageRepository", + Value: apiextensionsv1.JSON{Raw: []byte("\"" + clusterVariableImageRepository + "\"")}, + }, + } + clusterClassFile := "./testdata/clusterclass-quick-start-runtimesdk.yaml" + s, err := getScope(cluster, clusterClassFile) + g.Expect(err).ToNot(HaveOccurred()) + + // Create a RuntimeClient that is backed by our Runtime Extension. + runtimeClient := &injectRuntimeClient{ + runtimeExtension: NewExtensionHandlers(testScheme), + } + + // Create a ClusterClassReconciler. + fakeClient, mgr, err := createClusterClassFakeClientAndManager(s.Blueprint) + g.Expect(err).ToNot(HaveOccurred()) + clusterClassReconciler := controllers.ClusterClassReconciler{ + Client: fakeClient, + UnstructuredCachingClient: fakeClient, + RuntimeClient: runtimeClient, + } + err = clusterClassReconciler.SetupWithManager(ctx, mgr, controller.Options{}) + g.Expect(err).ToNot(HaveOccurred()) + + // Create a DesiredStateEngine. + desiredStateEngine := topology.NewDesiredStateEngine(nil, nil, runtimeClient) + + // Note: as of today we don't have to set any fields and also don't have to call + // SetupWebhookWithManager because DefaultAndValidateVariables doesn't need any of that. + clusterWebhook := webhooks.Cluster{} + + // Reconcile ClusterClass. + // Note: this also reconciles variables from inline and external variables into ClusterClass.status. + _, err = clusterClassReconciler.Reconcile(ctx, ctrl.Request{ + NamespacedName: types.NamespacedName{ + Namespace: s.Blueprint.ClusterClass.Namespace, + Name: s.Blueprint.ClusterClass.Name, + }}) + g.Expect(err).ToNot(HaveOccurred()) + // Overwrite ClusterClass in blueprint with the reconciled ClusterClass. + err = clusterClassReconciler.Client.Get(ctx, client.ObjectKeyFromObject(s.Blueprint.ClusterClass), s.Blueprint.ClusterClass) + g.Expect(err).ToNot(HaveOccurred()) + + // Run variable defaulting and validation on the Cluster object. + errs := clusterWebhook.DefaultAndValidateVariables(s.Current.Cluster, s.Blueprint.ClusterClass) + g.Expect(errs.ToAggregate()).ToNot(HaveOccurred()) + + // Return the desired state. + desiredState, err := desiredStateEngine.ComputeDesiredState(ctx, s) + g.Expect(err).ToNot(HaveOccurred()) + + dockerClusterImageRepository, found, err := unstructured.NestedString(desiredState.InfrastructureCluster.Object, "spec", "loadBalancer", "imageRepository") + g.Expect(err).ToNot(HaveOccurred()) + g.Expect(found).To(BeTrue()) + g.Expect(dockerClusterImageRepository).To(Equal(clusterVariableImageRepository)) +} + +func getCluster() *clusterv1.Cluster { + return &clusterv1.Cluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: "cluster-name-test", + Namespace: "namespace-name-test", + }, + Spec: clusterv1.ClusterSpec{ + ClusterNetwork: &clusterv1.ClusterNetwork{ + Pods: &clusterv1.NetworkRanges{ + CIDRBlocks: []string{"192.168.0.0/16"}, + }, + }, + Topology: &clusterv1.Topology{ + Version: "v1.29.0", + // NOTE: Class name must match the ClusterClass name. + Class: "quick-start-runtimesdk", + ControlPlane: clusterv1.ControlPlaneTopology{ + Replicas: ptr.To[int32](1), + }, + Workers: &clusterv1.WorkersTopology{ + MachineDeployments: []clusterv1.MachineDeploymentTopology{ + { + Name: "md-test1", + // NOTE: MachineDeploymentClass name must match what is defined in ClusterClass packages. + Class: "default-worker", + Replicas: ptr.To[int32](1), + }, + }, + }, + }, + }, + } +} + +func createClusterClassFakeClientAndManager(blueprint *scope.ClusterBlueprint) (client.Client, manager.Manager, error) { + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + _ = apiextensionsv1.AddToScheme(scheme) + objs := []client.Object{ + blueprint.ClusterClass, + } + + unstructuredObjs := []*unstructured.Unstructured{ + blueprint.InfrastructureClusterTemplate, + blueprint.ControlPlane.Template, + blueprint.ControlPlane.InfrastructureMachineTemplate, + } + for _, md := range blueprint.MachineDeployments { + unstructuredObjs = append(unstructuredObjs, md.InfrastructureMachineTemplate, md.BootstrapTemplate) + } + for _, mp := range blueprint.MachinePools { + unstructuredObjs = append(unstructuredObjs, mp.InfrastructureMachinePoolTemplate, mp.BootstrapTemplate) + } + + objAlreadyAdded := sets.Set[string]{} + crdAlreadyAdded := sets.Set[string]{} + for _, unstructuredObj := range unstructuredObjs { + if !objAlreadyAdded.Has(unstructuredObj.GroupVersionKind().Kind + "/" + unstructuredObj.GetName()) { + objs = append(objs, unstructuredObj) + objAlreadyAdded.Insert(unstructuredObj.GroupVersionKind().Kind + "/" + unstructuredObj.GetName()) + } + + crd := generateCRDForUnstructured(unstructuredObj) + if !crdAlreadyAdded.Has(crd.Name) { + objs = append(objs, crd) + crdAlreadyAdded.Insert(crd.Name) + } + } + + fakeClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects(objs...).WithStatusSubresource(&clusterv1.ClusterClass{}).Build() + + mgr, err := ctrl.NewManager(&rest.Config{}, ctrl.Options{Scheme: scheme}) + if err != nil { + return nil, nil, err + } + + return fakeClient, mgr, err +} + +func generateCRDForUnstructured(u *unstructured.Unstructured) *apiextensionsv1.CustomResourceDefinition { + gvk := u.GroupVersionKind() + return &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: contract.CalculateCRDName(gvk.Group, gvk.Kind), + Labels: map[string]string{ + clusterv1.GroupVersion.String(): gvk.Version, + }, + }, + } +} + +// getScope gets blueprint (ClusterClass) and current state based on cluster and clusterClassFile. +// Note: MachinePools have not been implemented as they are not supported by CAPV. +func getScope(cluster *clusterv1.Cluster, clusterClassFile string) (*scope.Scope, error) { + clusterClassYAML, err := os.ReadFile(clusterClassFile) //nolint:gosec // reading a file in tests is not a security issue. + if err != nil { + return nil, err + } + + // Get all objects by groupVersionKindName. + parsedObjects, err := parseObjects(clusterClassYAML) + if err != nil { + return nil, err + } + + s := scope.New(cluster) + s.Current.ControlPlane = &scope.ControlPlaneState{} + s.Blueprint = &scope.ClusterBlueprint{ + Topology: cluster.Spec.Topology, + ControlPlane: &scope.ControlPlaneBlueprint{}, + MachineDeployments: map[string]*scope.MachineDeploymentBlueprint{}, + MachinePools: map[string]*scope.MachinePoolBlueprint{}, + } + + // Get ClusterClass and referenced templates. + // ClusterClass + s.Blueprint.ClusterClass = mustFind(findObject[*clusterv1.ClusterClass](parsedObjects, groupVersionKindName{ + Kind: "ClusterClass", + })) + // InfrastructureClusterTemplate + s.Blueprint.InfrastructureClusterTemplate = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(s.Blueprint.ClusterClass.Spec.Infrastructure.Ref))) + + // ControlPlane + s.Blueprint.ControlPlane.Template = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(s.Blueprint.ClusterClass.Spec.ControlPlane.Ref))) + if s.Blueprint.HasControlPlaneInfrastructureMachine() { + s.Blueprint.ControlPlane.InfrastructureMachineTemplate = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(s.Blueprint.ClusterClass.Spec.ControlPlane.MachineInfrastructure.Ref))) + } + if s.Blueprint.HasControlPlaneMachineHealthCheck() { + s.Blueprint.ControlPlane.MachineHealthCheck = s.Blueprint.ClusterClass.Spec.ControlPlane.MachineHealthCheck + } + + // MachineDeployments. + for _, machineDeploymentClass := range s.Blueprint.ClusterClass.Spec.Workers.MachineDeployments { + machineDeploymentBlueprint := &scope.MachineDeploymentBlueprint{} + machineDeploymentClass.Template.Metadata.DeepCopyInto(&machineDeploymentBlueprint.Metadata) + machineDeploymentBlueprint.InfrastructureMachineTemplate = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(machineDeploymentClass.Template.Infrastructure.Ref))) + machineDeploymentBlueprint.BootstrapTemplate = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(machineDeploymentClass.Template.Bootstrap.Ref))) + if machineDeploymentClass.MachineHealthCheck != nil { + machineDeploymentBlueprint.MachineHealthCheck = machineDeploymentClass.MachineHealthCheck + } + s.Blueprint.MachineDeployments[machineDeploymentClass.Class] = machineDeploymentBlueprint + } + + // MachinePools. + for _, machinePoolClass := range s.Blueprint.ClusterClass.Spec.Workers.MachinePools { + machinePoolBlueprint := &scope.MachinePoolBlueprint{} + machinePoolClass.Template.Metadata.DeepCopyInto(&machinePoolBlueprint.Metadata) + machinePoolBlueprint.InfrastructureMachinePoolTemplate = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(machinePoolClass.Template.Infrastructure.Ref))) + machinePoolBlueprint.BootstrapTemplate = mustFind(findObject[*unstructured.Unstructured](parsedObjects, refToGroupVersionKindName(machinePoolClass.Template.Bootstrap.Ref))) + s.Blueprint.MachinePools[machinePoolClass.Class] = machinePoolBlueprint + } + + return s, nil +} + +type groupVersionKindName struct { + Name string + APIVersion string + Kind string +} + +// parseObjects parses objects in clusterClassYAML and returns them by groupVersionKindName. +func parseObjects(clusterClassYAML []byte) (map[groupVersionKindName]runtime.Object, error) { + // Only adding clusterv1 as we want to parse everything else as Unstructured, + // because everything else is stored as Unstructured in Scope. + scheme := runtime.NewScheme() + _ = clusterv1.AddToScheme(scheme) + universalDeserializer := serializer.NewCodecFactory(scheme).UniversalDeserializer() + + parsedObjects := map[groupVersionKindName]runtime.Object{} + // Inspired by cluster-api/util/yaml.ToUnstructured + reader := apiyaml.NewYAMLReader(bufio.NewReader(bytes.NewReader(clusterClassYAML))) + for { + // Read one YAML document at a time, until io.EOF is returned + objectBytes, err := reader.Read() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + return nil, errors.Wrapf(err, "failed to read YAML") + } + if len(objectBytes) == 0 { + break + } + + obj, gvk, err := universalDeserializer.Decode(objectBytes, nil, nil) + // Unmarshal to Unstructured if the type of the object is not registered. + if runtime.IsNotRegisteredError(err) { + u := &unstructured.Unstructured{} + if err := yaml.Unmarshal(objectBytes, u); err != nil { + return nil, err + } + parsedObjects[groupVersionKindName{ + Name: u.GetName(), + APIVersion: u.GroupVersionKind().GroupVersion().String(), + Kind: u.GroupVersionKind().Kind, + }] = u + continue + } + // Return if we got another error + if err != nil { + return nil, err + } + + // Add the unmarshalled typed object. + metaObj, ok := obj.(metav1.Object) + if !ok { + return nil, errors.Errorf("found an object which is not a metav1.Object") + } + parsedObjects[groupVersionKindName{ + Name: metaObj.GetName(), + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + }] = obj + } + return parsedObjects, nil +} + +func mustFind[K runtime.Object](obj K, err error) K { + if err != nil { + panic(err) + } + return obj +} + +// findObject looks up an object with the given groupVersionKindName in the given objects map. +func findObject[K runtime.Object](objects map[groupVersionKindName]runtime.Object, groupVersionKindName groupVersionKindName) (K, error) { + var res K + var alreadyFound bool + for gvkn, obj := range objects { + if groupVersionKindName.Name != "" && groupVersionKindName.Name != gvkn.Name { + continue + } + if groupVersionKindName.APIVersion != "" && groupVersionKindName.APIVersion != gvkn.APIVersion { + continue + } + if groupVersionKindName.Kind != "" && groupVersionKindName.Kind != gvkn.Kind { + continue + } + + if alreadyFound { + return res, errors.Errorf("found multiple objects matching %v", groupVersionKindName) + } + + objK, ok := obj.(K) + if !ok { + return res, errors.Errorf("found an object matching %v, but it has the wrong type", groupVersionKindName) + } + res = objK + alreadyFound = true + } + + return res, nil +} + +func refToGroupVersionKindName(ref *corev1.ObjectReference) groupVersionKindName { + return groupVersionKindName{ + APIVersion: ref.APIVersion, + Kind: ref.Kind, + Name: ref.Name, + } +} + +type TopologyMutationHook interface { + DiscoverVariables(ctx context.Context, req *runtimehooksv1.DiscoverVariablesRequest, resp *runtimehooksv1.DiscoverVariablesResponse) + GeneratePatches(ctx context.Context, req *runtimehooksv1.GeneratePatchesRequest, resp *runtimehooksv1.GeneratePatchesResponse) + ValidateTopology(ctx context.Context, req *runtimehooksv1.ValidateTopologyRequest, resp *runtimehooksv1.ValidateTopologyResponse) +} + +// injectRuntimeClient implements a runtimeclient.Client. +// It allows us to plug a TopologyMutationHook into Cluster and ClusterClass controllers. +type injectRuntimeClient struct { + runtimeExtension TopologyMutationHook +} + +func (i injectRuntimeClient) CallExtension(ctx context.Context, hook runtimecatalog.Hook, _ metav1.Object, _ string, req runtimehooksv1.RequestObject, resp runtimehooksv1.ResponseObject) error { + // Note: We have to copy the requests. Otherwise we could get side effect by Runtime Extensions + // modifying the request instead of properly returning a response. Also after Unmarshal, + // only the Raw fields in runtime.RawExtension fields should be filled out and Object should be nil. + // This wouldn't be the case without the copy. + switch runtimecatalog.HookName(hook) { + case runtimecatalog.HookName(runtimehooksv1.DiscoverVariables): + reqCopy, err := copyObject[runtimehooksv1.DiscoverVariablesRequest](req.(*runtimehooksv1.DiscoverVariablesRequest)) + if err != nil { + return err + } + i.runtimeExtension.DiscoverVariables(ctx, reqCopy, resp.(*runtimehooksv1.DiscoverVariablesResponse)) + if resp.GetStatus() == runtimehooksv1.ResponseStatusFailure { + return errors.Errorf("failed to call extension handler: got failure response: %v", resp.GetMessage()) + } + return nil + case runtimecatalog.HookName(runtimehooksv1.GeneratePatches): + reqCopy, err := copyObject[runtimehooksv1.GeneratePatchesRequest](req.(*runtimehooksv1.GeneratePatchesRequest)) + if err != nil { + return err + } + i.runtimeExtension.GeneratePatches(ctx, reqCopy, resp.(*runtimehooksv1.GeneratePatchesResponse)) + if resp.GetStatus() == runtimehooksv1.ResponseStatusFailure { + return errors.Errorf("failed to call extension handler: got failure response: %v", resp.GetMessage()) + } + return nil + case runtimecatalog.HookName(runtimehooksv1.ValidateTopology): + reqCopy, err := copyObject[runtimehooksv1.ValidateTopologyRequest](req.(*runtimehooksv1.ValidateTopologyRequest)) + if err != nil { + return err + } + i.runtimeExtension.ValidateTopology(ctx, reqCopy, resp.(*runtimehooksv1.ValidateTopologyResponse)) + if resp.GetStatus() == runtimehooksv1.ResponseStatusFailure { + return errors.Errorf("failed to call extension handler: got failure response: %v", resp.GetMessage()) + } + return nil + } + panic("implement me") +} + +// copyObject copies an object with json Marshal & Unmarshal. +func copyObject[T any](obj *T) (*T, error) { + objCopy := new(T) + + reqBytes, err := json.Marshal(obj) + if err != nil { + return nil, err + } + if err := json.Unmarshal(reqBytes, &objCopy); err != nil { + return nil, err + } + + return objCopy, nil +} + +func (i injectRuntimeClient) WarmUp(_ *runtimev1.ExtensionConfigList) error { + panic("implement me") +} + +func (i injectRuntimeClient) IsReady() bool { + panic("implement me") +} + +func (i injectRuntimeClient) Discover(_ context.Context, _ *runtimev1.ExtensionConfig) (*runtimev1.ExtensionConfig, error) { + panic("implement me") +} + +func (i injectRuntimeClient) Register(_ *runtimev1.ExtensionConfig) error { + panic("implement me") +} + +func (i injectRuntimeClient) Unregister(_ *runtimev1.ExtensionConfig) error { + panic("implement me") +} + +func (i injectRuntimeClient) CallAllExtensions(_ context.Context, _ runtimecatalog.Hook, _ metav1.Object, _ runtimehooksv1.RequestObject, _ runtimehooksv1.ResponseObject) error { + panic("implement me") +} diff --git a/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml new file mode 100644 index 000000000000..789d375a662a --- /dev/null +++ b/test/extension/handlers/topologymutation/testdata/clusterclass-quick-start-runtimesdk.yaml @@ -0,0 +1,148 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: ClusterClass +metadata: + name: quick-start-runtimesdk +spec: + controlPlane: + ref: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlaneTemplate + name: quick-start-control-plane + machineInfrastructure: + ref: + kind: DockerMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + name: quick-start-control-plane + namingStrategy: + template: "{{ .cluster.name }}-cp-{{ .random }}" + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerClusterTemplate + name: quick-start-cluster + workers: + machineDeployments: + - class: default-worker + namingStrategy: + template: "{{ .cluster.name }}-md-{{ .machineDeployment.topologyName }}-{{ .random }}" + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachineTemplate + name: quick-start-default-worker-machinetemplate + machinePools: + - class: default-worker + namingStrategy: + template: "{{ .cluster.name }}-mp-{{ .machinePool.topologyName }}-{{ .random }}" + template: + bootstrap: + ref: + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + name: quick-start-default-worker-bootstraptemplate + infrastructure: + ref: + apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 + kind: DockerMachinePoolTemplate + name: quick-start-default-worker-machinepooltemplate + patches: + - name: test-patch + external: + generateExtension: generate-patches.k8s-upgrade-with-runtimesdk + validateExtension: validate-topology.k8s-upgrade-with-runtimesdk + discoverVariablesExtension: discover-variables.k8s-upgrade-with-runtimesdk +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerClusterTemplate +metadata: + name: quick-start-cluster +spec: + template: + spec: + failureDomains: + fd1: + controlPlane: true + fd2: + controlPlane: true + fd3: + controlPlane: true + fd4: + controlPlane: false + fd5: + controlPlane: false + fd6: + controlPlane: false + fd7: + controlPlane: false + fd8: + controlPlane: false + loadBalancer: {} +--- +kind: KubeadmControlPlaneTemplate +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: quick-start-control-plane +spec: + template: + spec: + machineTemplate: + nodeDrainTimeout: 1s + kubeadmConfigSpec: + clusterConfiguration: + apiServer: + # host.docker.internal is required by kubetest when running on MacOS because of the way ports are proxied. + certSANs: [localhost, 127.0.0.1, 0.0.0.0, host.docker.internal] + initConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. + joinConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-control-plane +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachineTemplate +metadata: + name: quick-start-default-worker-machinetemplate +spec: + template: + spec: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1beta1 +kind: DockerMachinePoolTemplate +metadata: + name: quick-start-default-worker-machinepooltemplate +spec: + template: + spec: + template: + extraMounts: + - containerPath: "/var/run/docker.sock" + hostPath: "/var/run/docker.sock" +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: quick-start-default-worker-bootstraptemplate +spec: + template: + spec: + joinConfiguration: + nodeRegistration: {} # node registration parameters are automatically injected by CAPD according to the kindest/node image in use. diff --git a/test/go.mod b/test/go.mod index d546c947c5ea..e0b195a40899 100644 --- a/test/go.mod +++ b/test/go.mod @@ -38,6 +38,7 @@ require ( ) require ( + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/BurntSushi/toml v1.0.0 // indirect github.com/MakeNowJust/heredoc v1.0.0 // indirect github.com/Masterminds/goutils v1.1.1 // indirect @@ -53,6 +54,7 @@ require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/chai2010/gettext-go v1.0.2 // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-semver v0.3.1 // indirect @@ -63,8 +65,10 @@ require ( github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect github.com/evanphx/json-patch v5.7.0+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-errors/errors v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect @@ -75,6 +79,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect + github.com/google/btree v1.0.1 // indirect github.com/google/cel-go v0.17.7 // indirect github.com/google/gnostic-models v0.6.8 // indirect github.com/google/go-github/v53 v53.2.0 // indirect @@ -82,7 +87,10 @@ require ( github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect + github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.4.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect @@ -91,16 +99,20 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/morikuni/aec v1.0.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect @@ -108,9 +120,11 @@ require ( github.com/opencontainers/image-spec v1.0.2 // indirect github.com/pelletier/go-toml v1.9.5 // indirect github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.45.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect + github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect github.com/shopspring/decimal v1.3.1 // indirect @@ -122,6 +136,7 @@ require ( github.com/stoewer/go-strcase v1.2.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/fastjson v1.6.4 // indirect + github.com/xlab/treeprint v1.2.0 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.12 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.0 // indirect @@ -133,6 +148,7 @@ require ( go.opentelemetry.io/otel/sdk v1.22.0 // indirect go.opentelemetry.io/otel/trace v1.22.0 // indirect go.opentelemetry.io/proto/otlp v1.0.0 // indirect + go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.26.0 // indirect golang.org/x/crypto v0.21.0 // indirect @@ -155,9 +171,13 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gotest.tools/v3 v3.4.0 // indirect + k8s.io/cli-runtime v0.29.3 // indirect k8s.io/cluster-bootstrap v0.29.3 // indirect k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect + k8s.io/kubectl v0.29.3 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect + sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect ) diff --git a/test/go.sum b/test/go.sum index 481eb752cb23..74aa33ad4e6b 100644 --- a/test/go.sum +++ b/test/go.sum @@ -1,3 +1,4 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.110.10 h1:LXy9GEO+timppncPIAZoOj3l58LIU9k+kn48AN7IO3Y= cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= @@ -5,6 +6,7 @@ cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGB cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU= github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= @@ -40,11 +42,15 @@ github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2y github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk= +github.com/chai2010/gettext-go v1.0.2/go.mod h1:y+wnP2cHYaVj19NZhYKAwEMH2CI1gNHeQQ+5AjwawxA= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= @@ -67,6 +73,8 @@ github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= @@ -85,6 +93,8 @@ github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkp github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.12.0 h1:y2DdzBAURM29NFF94q6RaY4vjIH1rtwDapwQtU84iWk= github.com/emicklei/go-restful/v3 v3.12.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.0.2 h1:QkIBuU5k+x7/QXPvPPnWXWlCdaBFApVqftFV6k087DA= github.com/envoyproxy/protoc-gen-validate v1.0.2/go.mod h1:GpiZQP3dDbg4JouG/NNS7QWXpgx6x8QiMKdmN72jogE= github.com/evanphx/json-patch v5.7.0+incompatible h1:vgGkfT/9f8zE6tvSCe74nfpAVDQ2tG6yudJd8LBksgI= @@ -92,6 +102,8 @@ github.com/evanphx/json-patch v5.7.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flatcar/ignition v0.36.2 h1:xGHgScUe0P4Fkprjqv7L2CE58emiQgP833OCCn9z2v4= @@ -100,6 +112,8 @@ github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHk github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -125,10 +139,20 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= @@ -140,6 +164,11 @@ github.com/google/cel-go v0.17.7/go.mod h1:HXZKzB0LXqer5lHHgfWAnlYwJaQBDKMjxjulN github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= @@ -156,6 +185,8 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 h1:SJ+NtwL6QaZ21U+IrK7d0gGgpjGGvd2kz+FzTHVzdqI= github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2/go.mod h1:Tv1PlzqC9t8wNnpPdctvtSUOPUUg4SHeE6vR1Ir2hmg= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= +github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -163,6 +194,8 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= @@ -200,6 +233,8 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= @@ -212,6 +247,8 @@ github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQth github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -226,6 +263,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= +github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -247,6 +286,8 @@ github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3v github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pin/tftp v2.1.0+incompatible/go.mod h1:xVpZOMCXTy+A5QMjEVN0Glwa1sUvaJhFXbr/aAxuxGY= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -256,6 +297,7 @@ github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRI github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.18.0 h1:HzFfmkOzH5Q8L8G+kSJKUx5dtG87sewO+FoDDqP5Tbk= github.com/prometheus/client_golang v1.18.0/go.mod h1:T+GXkCk5wSJyOqMIzVgvvjFDlkOQntgjkJWKrN5txjA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.45.0 h1:2BGz0eBc2hdMDLnO/8n0jeB3oPrt2D08CekT0lneoxM= @@ -264,11 +306,14 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -299,6 +344,7 @@ github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ai github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -322,6 +368,8 @@ github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9o github.com/vmware/vmw-ovflib v0.0.0-20170608004843-1f217b9dc714/go.mod h1:jiPk45kn7klhByRvUq5i2vo1RtHKBHj+iWGFpxbXuuI= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= +github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -361,6 +409,8 @@ go.opentelemetry.io/otel/trace v1.22.0 h1:Hg6pPujv0XG9QaVbGOBVHunyuLcCC3jN7WEhPx go.opentelemetry.io/otel/trace v1.22.0/go.mod h1:RbbHXVqKES9QhzZq/fE5UnOSILqRt40a21sPw2He1xo= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca h1:VdD38733bfYv5tUZwEIskMM93VanwNIi5bIKnDrJdEY= +go.starlark.net v0.0.0-20230525235612-a134d8f9ddca/go.mod h1:jxU+3+j+71eXOW14274+SmmuW82qJzl6iZSeqEtTGds= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -375,11 +425,18 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -391,14 +448,18 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -408,10 +469,12 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -419,6 +482,7 @@ golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= @@ -432,7 +496,11 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -446,16 +514,32 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:J7XzRzVy1+IPwWHZUzoD0IccYZIrXILAQpc+Qy9CMhY= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17 h1:JpwMPBpFN3uKhdaekDpiNlImDdkUAyiJ6ez/uxGaUSo= google.golang.org/genproto/googleapis/api v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:0xJLfVdJqpAPl8tDg1ujOCGzx6LFLttXT5NhllGOXY4= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f h1:ultW7fxlIvee4HYrtnaRPon9HpEgFk5zYpmfMgtKB5I= google.golang.org/genproto/googleapis/rpc v0.0.0-20231120223509-83a465c0220f/go.mod h1:L9KNLi232K1/xB6f7AlSX692koaRnKaWSR0stBki0Yc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.60.1 h1:26+wFr+cNqSGFcOXcabYC0lUVJVRa2Sb2ortSK7VrEU= google.golang.org/grpc v1.60.1/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= @@ -481,6 +565,8 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/api v0.29.3 h1:2ORfZ7+bGC3YJqGpV0KSDDEVf8hdGQ6A03/50vj8pmw= k8s.io/api v0.29.3/go.mod h1:y2yg2NTyHUUkIoTC+phinTnEa3KFM6RZ3szxt014a80= k8s.io/apiextensions-apiserver v0.29.3 h1:9HF+EtZaVpFjStakF4yVufnXGPRppWFEQ87qnO91YeI= @@ -489,6 +575,8 @@ k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU= k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU= k8s.io/apiserver v0.29.3 h1:xR7ELlJ/BZSr2n4CnD3lfA4gzFivh0wwfNfz9L0WZcE= k8s.io/apiserver v0.29.3/go.mod h1:hrvXlwfRulbMbBgmWRQlFru2b/JySDpmzvQwwk4GUOs= +k8s.io/cli-runtime v0.29.3 h1:r68rephmmytoywkw2MyJ+CxjpasJDQY7AGc3XY2iv1k= +k8s.io/cli-runtime v0.29.3/go.mod h1:aqVUsk86/RhaGJwDhHXH0jcdqBrgdF3bZWk4Z9D4mkM= k8s.io/client-go v0.29.3 h1:R/zaZbEAxqComZ9FHeQwOh3Y1ZUs7FaHKZdQtIc2WZg= k8s.io/client-go v0.29.3/go.mod h1:tkDisCvgPfiRpxGnOORfkljmS+UrW+WtXAy2fTvXJB0= k8s.io/cluster-bootstrap v0.29.3 h1:DIMDZSN8gbFMy9CS2mAS2Iqq/fIUG783WN/1lqi5TF8= @@ -499,6 +587,8 @@ k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/AuzbMm96cd3YHRTU83I780= k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/kubectl v0.29.3 h1:RuwyyIU42MAISRIePaa8Q7A3U74Q9P4MoJbDFz9o3us= +k8s.io/kubectl v0.29.3/go.mod h1:yCxfY1dbwgVdEt2zkJ6d5NNLOhhWgTyrqACIoFhpdd4= k8s.io/utils v0.0.0-20231127182322-b307cd553661 h1:FepOBzJ0GXm8t0su67ln2wAZjbQ6RxQGZDnzuLcrUTI= k8s.io/utils v0.0.0-20231127182322-b307cd553661/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= @@ -509,6 +599,10 @@ sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMm sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.22.0 h1:z/+yr/azoOfzsfooqRsPw1wjJlqT/ukXP0ShkHwNlsI= sigs.k8s.io/kind v0.22.0/go.mod h1:aBlbxg08cauDgZ612shr017/rZwqd7AS563FvpWKPVs= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= +sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= +sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= From 60af25fabf0eaa386af80079fec308d08ac1616e Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 28 Mar 2024 12:50:06 +0100 Subject: [PATCH 07/10] Address review comments --- exp/runtime/topologymutation/walker_test.go | 3 +- .../desiredstate}/desired_state.go | 80 ++++++------------- .../desiredstate}/desired_state_test.go | 50 ++++++------ .../desiredstate}/scope/blueprint.go | 0 .../desiredstate}/scope/blueprint_test.go | 0 .../desiredstate}/scope/doc.go | 0 .../scope/hookresponsetracker.go | 0 .../scope/hookresponsetracker_test.go | 0 .../desiredstate}/scope/scope.go | 0 .../desiredstate}/scope/scope_test.go | 0 .../desiredstate}/scope/state.go | 0 .../desiredstate}/scope/state_test.go | 0 .../desiredstate}/scope/upgradetracker.go | 28 +++++++ .../scope/upgradetracker_test.go | 0 .../controllers/topology/cluster/blueprint.go | 2 +- .../topology/cluster/blueprint_test.go | 2 +- .../topology/cluster/cluster_controller.go | 14 ++-- .../cluster/cluster_controller_test.go | 2 +- .../topology/cluster/conditions.go | 2 +- .../topology/cluster/conditions_test.go | 2 +- .../topology/cluster/current_state.go | 2 +- .../topology/cluster/current_state_test.go | 2 +- .../topology/cluster/patches/engine.go | 2 +- .../topology/cluster/patches/engine_test.go | 2 +- .../topology/cluster/reconcile_state.go | 4 +- .../topology/cluster/reconcile_state_test.go | 12 +-- .../handler_integration_test.go | 10 +-- 27 files changed, 109 insertions(+), 110 deletions(-) rename exp/{util/topology => topology/desiredstate}/desired_state.go (94%) rename exp/{util/topology => topology/desiredstate}/desired_state_test.go (99%) rename exp/{util/topology => topology/desiredstate}/scope/blueprint.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/blueprint_test.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/doc.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/hookresponsetracker.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/hookresponsetracker_test.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/scope.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/scope_test.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/state.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/state_test.go (100%) rename exp/{util/topology => topology/desiredstate}/scope/upgradetracker.go (92%) rename exp/{util/topology => topology/desiredstate}/scope/upgradetracker_test.go (100%) diff --git a/exp/runtime/topologymutation/walker_test.go b/exp/runtime/topologymutation/walker_test.go index 58370a30492e..570232669dee 100644 --- a/exp/runtime/topologymutation/walker_test.go +++ b/exp/runtime/topologymutation/walker_test.go @@ -20,6 +20,7 @@ import ( "bytes" "context" "encoding/json" + "fmt" "testing" . "github.com/onsi/gomega" @@ -95,7 +96,7 @@ func Test_WalkTemplates(t *testing.T) { expectedResponse: &runtimehooksv1.GeneratePatchesResponse{ CommonResponse: runtimehooksv1.CommonResponse{ Status: runtimehooksv1.ResponseStatusFailure, - Message: "failed to merge builtin variables: failed to unmarshal builtin variable: json: cannot unmarshal string into Go value of type v1alpha1.Builtins", + Message: fmt.Sprintf("failed to merge builtin variables: failed to unmarshal builtin variable: json: cannot unmarshal string into Go value of type %s.Builtins", runtimehooksv1.GroupVersion.Version), }, }, }, diff --git a/exp/util/topology/desired_state.go b/exp/topology/desiredstate/desired_state.go similarity index 94% rename from exp/util/topology/desired_state.go rename to exp/topology/desiredstate/desired_state.go index 5f57fd6da154..dedd611b4552 100644 --- a/exp/util/topology/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -14,8 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package topology contains cluster topology utils, e.g. to compute the desired state. -package topology +// Package desiredstate contains cluster topology utils, e.g. to compute the desired state. +package desiredstate import ( "context" @@ -35,7 +35,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" @@ -50,16 +50,14 @@ import ( "sigs.k8s.io/cluster-api/util" ) -// DesiredStateEngine is an desiredStateEngine to compute the desired state. -type DesiredStateEngine interface { - ComputeDesiredState(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) - - IsControlPlaneStable(s *scope.Scope) bool +// Generator is a generator to generate the desired state. +type Generator interface { + Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) } -// NewDesiredStateEngine creates a new desired state desiredStateEngine. -func NewDesiredStateEngine(client client.Client, tracker *remote.ClusterCacheTracker, runtimeClient runtimeclient.Client) DesiredStateEngine { - return &desiredStateEngine{ +// NewGenerator creates a new generator to generate desired state. +func NewGenerator(client client.Client, tracker *remote.ClusterCacheTracker, runtimeClient runtimeclient.Client) Generator { + return &generator{ Client: client, Tracker: tracker, RuntimeClient: runtimeClient, @@ -67,9 +65,9 @@ func NewDesiredStateEngine(client client.Client, tracker *remote.ClusterCacheTra } } -// desiredStateEngine is an desiredStateEngine to compute the desired state. +// generator is a generator to generate desired state. // It is used in the cluster topology controller, but it can also be used for testing. -type desiredStateEngine struct { +type generator struct { Client client.Client Tracker *remote.ClusterCacheTracker @@ -80,11 +78,11 @@ type desiredStateEngine struct { patchEngine patches.Engine } -// ComputeDesiredState computes the desired state of the cluster topology. +// Generate computes the desired state of the cluster topology. // NOTE: We are assuming all the required objects are provided as input; also, in case of any error, // the entire compute operation will fail. This might be improved in the future if support for reconciling // subset of a topology will be implemented. -func (e *desiredStateEngine) ComputeDesiredState(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { +func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { var err error desiredState := &scope.ClusterState{ ControlPlane: &scope.ControlPlaneState{}, @@ -251,7 +249,7 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop // computeControlPlane computes the desired state for the ControlPlane object starting from the // corresponding template defined in the blueprint. -func (e *desiredStateEngine) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref cluster := s.Current.Cluster @@ -400,7 +398,7 @@ func (e *desiredStateEngine) computeControlPlane(ctx context.Context, s *scope.S // computeControlPlaneVersion calculates the version of the desired control plane. // The version is calculated using the state of the current machine deployments, the current control plane // and the version defined in the topology. -func (e *desiredStateEngine) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { +func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { log := tlog.LoggerFrom(ctx) desiredVersion := s.Blueprint.Topology.Version // If we are creating the control plane object (current control plane is nil), use version from topology. @@ -607,7 +605,7 @@ func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredRe } // computeMachineDeployments computes the desired state of the list of MachineDeployments. -func (e *desiredStateEngine) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { +func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap) for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments { desiredMachineDeployment, err := e.computeMachineDeployment(ctx, s, mdTopology) @@ -622,7 +620,7 @@ func (e *desiredStateEngine) computeMachineDeployments(ctx context.Context, s *s // computeMachineDeployment computes the desired state for a MachineDeploymentTopology. // The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and // the machineDeployment class. -func (e *desiredStateEngine) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { +func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { desiredMachineDeployment := &scope.MachineDeploymentState{} // Gets the blueprint for the MachineDeployment class. @@ -842,14 +840,14 @@ func (e *desiredStateEngine) computeMachineDeployment(ctx context.Context, s *sc // computeMachineDeploymentVersion calculates the version of the desired machine deployment. // The version is calculated using the state of the current machine deployments, // the current control plane and the version defined in the topology. -func (e *desiredStateEngine) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { +func (e *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine deployment, mark it as pending if the control plane is not // yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions. // Example: join could fail if the load balancers are slow in detecting when CP machines are // being deleted. if currentMDState == nil || currentMDState.Object == nil { - if !e.IsControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { + if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { s.UpgradeTracker.MachineDeployments.MarkPendingCreate(machineDeploymentTopology.Name) } return desiredVersion @@ -886,7 +884,7 @@ func (e *desiredStateEngine) computeMachineDeploymentVersion(s *scope.Scope, mac // Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet. // Return the current version of the machine deployment. We will pick up the new version after the control // plane is stable. - if !e.IsControlPlaneStable(s) { + if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() { s.UpgradeTracker.MachineDeployments.MarkPendingUpgrade(currentMDState.Object.Name) return currentVersion } @@ -897,34 +895,6 @@ func (e *desiredStateEngine) computeMachineDeploymentVersion(s *scope.Scope, mac return desiredVersion } -// IsControlPlaneStable returns true is the ControlPlane is stable. -func (e *desiredStateEngine) IsControlPlaneStable(s *scope.Scope) bool { - // If the current control plane is upgrading it is not considered stable. - if s.UpgradeTracker.ControlPlane.IsUpgrading { - return false - } - - // If control plane supports replicas, check if the control plane is in the middle of a scale operation. - // If the current control plane is scaling then it is not considered stable. - if s.UpgradeTracker.ControlPlane.IsScaling { - return false - } - - // Check if we are about to upgrade the control plane. Since the control plane is about to start its upgrade process - // it cannot be considered stable. - if s.UpgradeTracker.ControlPlane.IsStartingUpgrade { - return false - } - - // If the ControlPlane is pending picking up an upgrade then it is not yet at the desired state and - // cannot be considered stable. - if s.UpgradeTracker.ControlPlane.IsPendingUpgrade { - return false - } - - return true -} - // isMachineDeploymentDeferred returns true if the upgrade for the mdTopology is deferred. // This is the case when either: // - the mdTopology has the ClusterTopologyDeferUpgradeAnnotation annotation. @@ -961,7 +931,7 @@ func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology } // computeMachinePools computes the desired state of the list of MachinePools. -func (e *desiredStateEngine) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { +func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { machinePoolsStateMap := make(scope.MachinePoolsStateMap) for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools { desiredMachinePool, err := e.computeMachinePool(ctx, s, mpTopology) @@ -976,7 +946,7 @@ func (e *desiredStateEngine) computeMachinePools(ctx context.Context, s *scope.S // computeMachinePool computes the desired state for a MachinePoolTopology. // The generated machinePool object is calculated using the values from the machinePoolTopology and // the machinePool class. -func (e *desiredStateEngine) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { +func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { desiredMachinePool := &scope.MachinePoolState{} // Gets the blueprint for the MachinePool class. @@ -1172,14 +1142,14 @@ func (e *desiredStateEngine) computeMachinePool(_ context.Context, s *scope.Scop // computeMachinePoolVersion calculates the version of the desired machine pool. // The version is calculated using the state of the current machine pools, // the current control plane and the version defined in the topology. -func (e *desiredStateEngine) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { +func (e *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine pool, mark it as pending if the control plane is not // yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions. // Example: join could fail if the load balancers are slow in detecting when CP machines are // being deleted. if currentMPState == nil || currentMPState.Object == nil { - if !e.IsControlPlaneStable(s) || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { + if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() || s.HookResponseTracker.IsBlocking(runtimehooksv1.AfterControlPlaneUpgrade) { s.UpgradeTracker.MachinePools.MarkPendingCreate(machinePoolTopology.Name) } return desiredVersion @@ -1216,7 +1186,7 @@ func (e *desiredStateEngine) computeMachinePoolVersion(s *scope.Scope, machinePo // Return early if the Control Plane is not stable. Do not pick up the desiredVersion yet. // Return the current version of the machine pool. We will pick up the new version after the control // plane is stable. - if !e.IsControlPlaneStable(s) { + if !s.UpgradeTracker.ControlPlane.IsControlPlaneStable() { s.UpgradeTracker.MachinePools.MarkPendingUpgrade(currentMPState.Object.Name) return currentVersion } diff --git a/exp/util/topology/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go similarity index 99% rename from exp/util/topology/desired_state_test.go rename to exp/topology/desiredstate/desired_state_test.go index 89b140e11af5..504df993c62f 100644 --- a/exp/util/topology/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package topology +package desiredstate import ( "strings" @@ -40,7 +40,7 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" @@ -338,7 +338,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -399,7 +399,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -429,7 +429,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithoutReplicas) scope.Blueprint = blueprint - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -474,7 +474,7 @@ func TestComputeControlPlane(t *testing.T) { s.Blueprint = blueprint s.Current.ControlPlane = &scope.ControlPlaneState{} - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate) g.Expect(err).ToNot(HaveOccurred()) @@ -535,7 +535,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithControlPlaneRef) scope.Blueprint = blueprint - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -605,7 +605,7 @@ func TestComputeControlPlane(t *testing.T) { Object: tt.currentControlPlane, } - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -645,7 +645,7 @@ func TestComputeControlPlane(t *testing.T) { s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) s.Blueprint = blueprint - r := &desiredStateEngine{} + r := &generator{} obj, err := r.computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) @@ -883,7 +883,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build() - r := &desiredStateEngine{ + r := &generator{ Client: fakeClient, RuntimeClient: runtimeClient, } @@ -1186,7 +1186,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(tt.s.Current.Cluster).Build() - r := &desiredStateEngine{ + r := &generator{ Client: fakeClient, RuntimeClient: fakeRuntimeClient, } @@ -1262,7 +1262,7 @@ func TestComputeControlPlaneVersion(t *testing.T) { fakeClient := fake.NewClientBuilder().WithScheme(fakeScheme).WithObjects(s.Current.Cluster).Build() - r := &desiredStateEngine{ + r := &generator{ Client: fakeClient, RuntimeClient: runtimeClient, } @@ -1435,7 +1435,7 @@ func TestComputeMachineDeployment(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1506,7 +1506,7 @@ func TestComputeMachineDeployment(t *testing.T) { // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy } - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1552,7 +1552,7 @@ func TestComputeMachineDeployment(t *testing.T) { }, } - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachineDeployment(ctx, s, mdTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1602,7 +1602,7 @@ func TestComputeMachineDeployment(t *testing.T) { Name: "big-pool-of-machines", } - e := desiredStateEngine{} + e := generator{} _, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).To(HaveOccurred()) @@ -1717,7 +1717,7 @@ func TestComputeMachineDeployment(t *testing.T) { } s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) - e := desiredStateEngine{} + e := generator{} obj, err := e.computeMachineDeployment(ctx, s, mdTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1735,7 +1735,7 @@ func TestComputeMachineDeployment(t *testing.T) { Name: "big-pool-of-machines", } - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachineDeployment(ctx, scope, mdTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1846,7 +1846,7 @@ func TestComputeMachinePool(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachinePool(ctx, scope, mpTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1911,7 +1911,7 @@ func TestComputeMachinePool(t *testing.T) { // missing FailureDomain, NodeDrainTimeout, NodeVolumeDetachTimeout, NodeDeletionTimeout, MinReadySeconds, Strategy } - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachinePool(ctx, scope, mpTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -1956,7 +1956,7 @@ func TestComputeMachinePool(t *testing.T) { }, } - e := desiredStateEngine{} + e := generator{} actual, err := e.computeMachinePool(ctx, s, mpTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -2001,7 +2001,7 @@ func TestComputeMachinePool(t *testing.T) { Name: "big-pool-of-machines", } - e := desiredStateEngine{} + e := generator{} _, err := e.computeMachinePool(ctx, scope, mpTopology) g.Expect(err).To(HaveOccurred()) @@ -2114,7 +2114,7 @@ func TestComputeMachinePool(t *testing.T) { } s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) - e := desiredStateEngine{} + e := generator{} obj, err := e.computeMachinePool(ctx, s, mpTopology) g.Expect(err).ToNot(HaveOccurred()) @@ -2281,7 +2281,7 @@ func TestComputeMachineDeploymentVersion(t *testing.T) { s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning s.UpgradeTracker.MachineDeployments.MarkUpgrading(tt.upgradingMachineDeployments...) - e := desiredStateEngine{} + e := generator{} version := e.computeMachineDeploymentVersion(s, tt.machineDeploymentTopology, tt.currentMachineDeploymentState) g.Expect(version).To(Equal(tt.expectedVersion)) @@ -2462,7 +2462,7 @@ func TestComputeMachinePoolVersion(t *testing.T) { s.UpgradeTracker.ControlPlane.IsProvisioning = tt.controlPlaneProvisioning s.UpgradeTracker.MachinePools.MarkUpgrading(tt.upgradingMachinePools...) - e := desiredStateEngine{} + e := generator{} version := e.computeMachinePoolVersion(s, tt.machinePoolTopology, tt.currentMachinePoolState) g.Expect(version).To(Equal(tt.expectedVersion)) diff --git a/exp/util/topology/scope/blueprint.go b/exp/topology/desiredstate/scope/blueprint.go similarity index 100% rename from exp/util/topology/scope/blueprint.go rename to exp/topology/desiredstate/scope/blueprint.go diff --git a/exp/util/topology/scope/blueprint_test.go b/exp/topology/desiredstate/scope/blueprint_test.go similarity index 100% rename from exp/util/topology/scope/blueprint_test.go rename to exp/topology/desiredstate/scope/blueprint_test.go diff --git a/exp/util/topology/scope/doc.go b/exp/topology/desiredstate/scope/doc.go similarity index 100% rename from exp/util/topology/scope/doc.go rename to exp/topology/desiredstate/scope/doc.go diff --git a/exp/util/topology/scope/hookresponsetracker.go b/exp/topology/desiredstate/scope/hookresponsetracker.go similarity index 100% rename from exp/util/topology/scope/hookresponsetracker.go rename to exp/topology/desiredstate/scope/hookresponsetracker.go diff --git a/exp/util/topology/scope/hookresponsetracker_test.go b/exp/topology/desiredstate/scope/hookresponsetracker_test.go similarity index 100% rename from exp/util/topology/scope/hookresponsetracker_test.go rename to exp/topology/desiredstate/scope/hookresponsetracker_test.go diff --git a/exp/util/topology/scope/scope.go b/exp/topology/desiredstate/scope/scope.go similarity index 100% rename from exp/util/topology/scope/scope.go rename to exp/topology/desiredstate/scope/scope.go diff --git a/exp/util/topology/scope/scope_test.go b/exp/topology/desiredstate/scope/scope_test.go similarity index 100% rename from exp/util/topology/scope/scope_test.go rename to exp/topology/desiredstate/scope/scope_test.go diff --git a/exp/util/topology/scope/state.go b/exp/topology/desiredstate/scope/state.go similarity index 100% rename from exp/util/topology/scope/state.go rename to exp/topology/desiredstate/scope/state.go diff --git a/exp/util/topology/scope/state_test.go b/exp/topology/desiredstate/scope/state_test.go similarity index 100% rename from exp/util/topology/scope/state_test.go rename to exp/topology/desiredstate/scope/state_test.go diff --git a/exp/util/topology/scope/upgradetracker.go b/exp/topology/desiredstate/scope/upgradetracker.go similarity index 92% rename from exp/util/topology/scope/upgradetracker.go rename to exp/topology/desiredstate/scope/upgradetracker.go index 5249e7ba2a29..d4eec94e64f2 100644 --- a/exp/util/topology/scope/upgradetracker.go +++ b/exp/topology/desiredstate/scope/upgradetracker.go @@ -162,6 +162,34 @@ func NewUpgradeTracker(opts ...UpgradeTrackerOption) *UpgradeTracker { } } +// IsControlPlaneStable returns true is the ControlPlane is stable. +func (t *ControlPlaneUpgradeTracker) IsControlPlaneStable() bool { + // If the current control plane is upgrading it is not considered stable. + if t.IsUpgrading { + return false + } + + // If control plane supports replicas, check if the control plane is in the middle of a scale operation. + // If the current control plane is scaling then it is not considered stable. + if t.IsScaling { + return false + } + + // Check if we are about to upgrade the control plane. Since the control plane is about to start its upgrade process + // it cannot be considered stable. + if t.IsStartingUpgrade { + return false + } + + // If the ControlPlane is pending picking up an upgrade then it is not yet at the desired state and + // cannot be considered stable. + if t.IsPendingUpgrade { + return false + } + + return true +} + // MarkUpgrading marks a MachineDeployment/MachinePool as currently upgrading or about to upgrade. func (m *WorkerUpgradeTracker) MarkUpgrading(names ...string) { for _, name := range names { diff --git a/exp/util/topology/scope/upgradetracker_test.go b/exp/topology/desiredstate/scope/upgradetracker_test.go similarity index 100% rename from exp/util/topology/scope/upgradetracker_test.go rename to exp/topology/desiredstate/scope/upgradetracker_test.go diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index a30b48fb2881..fb79c63e0de7 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" tlog "sigs.k8s.io/cluster-api/internal/log" ) diff --git a/internal/controllers/topology/cluster/blueprint_test.go b/internal/controllers/topology/cluster/blueprint_test.go index 73bff3407605..4fe59a4e04cf 100644 --- a/internal/controllers/topology/cluster/blueprint_test.go +++ b/internal/controllers/topology/cluster/blueprint_test.go @@ -28,7 +28,7 @@ import ( . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/internal/test/builder" ) diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index 55f4c1103436..f6720d68f6b6 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -40,8 +40,8 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" @@ -84,8 +84,8 @@ type Reconciler struct { externalTracker external.ObjectTracker recorder record.EventRecorder - // desiredStateEngine is used to apply patches during computeDesiredState. - desiredStateEngine topology.DesiredStateEngine + // desiredStateGenerator is used to generate the desired state. + desiredStateGenerator desiredstate.Generator patchHelperFactory structuredmerge.PatchHelperFactoryFunc } @@ -125,7 +125,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt Controller: c, Cache: mgr.GetCache(), } - r.desiredStateEngine = topology.NewDesiredStateEngine(r.Client, r.Tracker, r.RuntimeClient) + r.desiredStateGenerator = desiredstate.NewGenerator(r.Client, r.Tracker, r.RuntimeClient) r.recorder = mgr.GetEventRecorderFor("topology/cluster-controller") if r.patchHelperFactory == nil { r.patchHelperFactory = serverSideApplyPatchHelperFactory(r.Client, ssa.NewCache()) @@ -135,7 +135,7 @@ func (r *Reconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, opt // SetupForDryRun prepares the Reconciler for a dry run execution. func (r *Reconciler) SetupForDryRun(recorder record.EventRecorder) { - r.desiredStateEngine = topology.NewDesiredStateEngine(r.Client, r.Tracker, r.RuntimeClient) + r.desiredStateGenerator = desiredstate.NewGenerator(r.Client, r.Tracker, r.RuntimeClient) r.recorder = recorder r.patchHelperFactory = dryRunPatchHelperFactory(r.Client) } @@ -272,7 +272,7 @@ func (r *Reconciler) reconcile(ctx context.Context, s *scope.Scope) (ctrl.Result } // Computes the desired state of the Cluster and store it in the request scope. - s.Desired, err = r.desiredStateEngine.ComputeDesiredState(ctx, s) + s.Desired, err = r.desiredStateGenerator.Generate(ctx, s) if err != nil { return ctrl.Result{}, errors.Wrap(err, "error computing the desired state of the Cluster topology") } diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 2239d5bea2fc..653f2455b57b 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -40,7 +40,7 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 67d50d557f33..2d53d0da1343 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index 2c211a511e2a..7673522ac652 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -30,7 +30,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index 6f7d1d0ab785..646738e8e534 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -29,7 +29,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/internal/contract" tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util/labels" diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index 68e6cbcc478a..76f965194ec0 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -30,7 +30,7 @@ import ( . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/topology/selectors" ) diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index 08c513f17882..d5ffb9dc9cde 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -31,7 +31,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/api" diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index 823c90f95ed9..64ed95d47f47 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -35,7 +35,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index d3f161208176..43c6326a69f5 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -38,7 +38,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" @@ -238,7 +238,7 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope // - MachineDeployments/MachinePools are not currently upgrading // - MachineDeployments/MachinePools are not pending an upgrade // - MachineDeployments/MachinePools are not pending create - if r.desiredStateEngine.IsControlPlaneStable(s) && // Control Plane stable checks + if s.UpgradeTracker.ControlPlane.IsControlPlaneStable() && // Control Plane stable checks len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade !s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create !s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() && // No MachineDeployments are pending an upgrade diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index edb8137d7e38..0d98edd40e77 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -44,8 +44,8 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" @@ -1080,10 +1080,10 @@ func TestReconcile_callAfterClusterUpgrade(t *testing.T) { fakeClient := fake.NewClientBuilder().WithObjects(tt.s.Current.Cluster).Build() r := &Reconciler{ - Client: fakeClient, - APIReader: fakeClient, - RuntimeClient: fakeRuntimeClient, - desiredStateEngine: topology.NewDesiredStateEngine(fakeClient, nil, fakeRuntimeClient), + Client: fakeClient, + APIReader: fakeClient, + RuntimeClient: fakeRuntimeClient, + desiredStateGenerator: desiredstate.NewGenerator(fakeClient, nil, fakeRuntimeClient), } err := r.callAfterClusterUpgrade(ctx, tt.s) diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 5fd965a69379..fbf61c368e42 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -51,8 +51,8 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/util/topology" - "sigs.k8s.io/cluster-api/exp/util/topology/scope" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate" + "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/contract" "sigs.k8s.io/cluster-api/webhooks" @@ -98,8 +98,8 @@ func TestHandler(t *testing.T) { err = clusterClassReconciler.SetupWithManager(ctx, mgr, controller.Options{}) g.Expect(err).ToNot(HaveOccurred()) - // Create a DesiredStateEngine. - desiredStateEngine := topology.NewDesiredStateEngine(nil, nil, runtimeClient) + // Create a desired state generator. + desiredStateGenerator := desiredstate.NewGenerator(nil, nil, runtimeClient) // Note: as of today we don't have to set any fields and also don't have to call // SetupWebhookWithManager because DefaultAndValidateVariables doesn't need any of that. @@ -122,7 +122,7 @@ func TestHandler(t *testing.T) { g.Expect(errs.ToAggregate()).ToNot(HaveOccurred()) // Return the desired state. - desiredState, err := desiredStateEngine.ComputeDesiredState(ctx, s) + desiredState, err := desiredStateGenerator.Generate(ctx, s) g.Expect(err).ToNot(HaveOccurred()) dockerClusterImageRepository, found, err := unstructured.NestedString(desiredState.InfrastructureCluster.Object, "spec", "loadBalancer", "imageRepository") From ca267eadbe7e3ca1189dfeebc22b72bba45f67bc Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 28 Mar 2024 13:01:18 +0100 Subject: [PATCH 08/10] Regenerate openapi.go --- .../api/v1alpha1/zz_generated.openapi.go | 501 ++++++++++++++++-- 1 file changed, 471 insertions(+), 30 deletions(-) diff --git a/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go b/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go index 76016618d974..27ffa4288242 100644 --- a/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go +++ b/exp/runtime/hooks/api/v1alpha1/zz_generated.openapi.go @@ -28,36 +28,48 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterClusterUpgradeRequest": schema_runtime_hooks_api_v1alpha1_AfterClusterUpgradeRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterClusterUpgradeResponse": schema_runtime_hooks_api_v1alpha1_AfterClusterUpgradeResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneInitializedRequest": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneInitializedRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneInitializedResponse": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneInitializedResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneUpgradeRequest": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneUpgradeRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneUpgradeResponse": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneUpgradeResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterCreateRequest": schema_runtime_hooks_api_v1alpha1_BeforeClusterCreateRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterCreateResponse": schema_runtime_hooks_api_v1alpha1_BeforeClusterCreateResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterDeleteRequest": schema_runtime_hooks_api_v1alpha1_BeforeClusterDeleteRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterDeleteResponse": schema_runtime_hooks_api_v1alpha1_BeforeClusterDeleteResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterUpgradeRequest": schema_runtime_hooks_api_v1alpha1_BeforeClusterUpgradeRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterUpgradeResponse": schema_runtime_hooks_api_v1alpha1_BeforeClusterUpgradeResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.CommonRequest": schema_runtime_hooks_api_v1alpha1_CommonRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.CommonResponse": schema_runtime_hooks_api_v1alpha1_CommonResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.CommonRetryResponse": schema_runtime_hooks_api_v1alpha1_CommonRetryResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoverVariablesRequest": schema_runtime_hooks_api_v1alpha1_DiscoverVariablesRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoverVariablesResponse": schema_runtime_hooks_api_v1alpha1_DiscoverVariablesResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoveryRequest": schema_runtime_hooks_api_v1alpha1_DiscoveryRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoveryResponse": schema_runtime_hooks_api_v1alpha1_DiscoveryResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ExtensionHandler": schema_runtime_hooks_api_v1alpha1_ExtensionHandler(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesRequest": schema_runtime_hooks_api_v1alpha1_GeneratePatchesRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesRequestItem": schema_runtime_hooks_api_v1alpha1_GeneratePatchesRequestItem(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesResponse": schema_runtime_hooks_api_v1alpha1_GeneratePatchesResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesResponseItem": schema_runtime_hooks_api_v1alpha1_GeneratePatchesResponseItem(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GroupVersionHook": schema_runtime_hooks_api_v1alpha1_GroupVersionHook(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.HolderReference": schema_runtime_hooks_api_v1alpha1_HolderReference(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ValidateTopologyRequest": schema_runtime_hooks_api_v1alpha1_ValidateTopologyRequest(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ValidateTopologyRequestItem": schema_runtime_hooks_api_v1alpha1_ValidateTopologyRequestItem(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ValidateTopologyResponse": schema_runtime_hooks_api_v1alpha1_ValidateTopologyResponse(ref), - "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.Variable": schema_runtime_hooks_api_v1alpha1_Variable(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterClusterUpgradeRequest": schema_runtime_hooks_api_v1alpha1_AfterClusterUpgradeRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterClusterUpgradeResponse": schema_runtime_hooks_api_v1alpha1_AfterClusterUpgradeResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneInitializedRequest": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneInitializedRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneInitializedResponse": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneInitializedResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneUpgradeRequest": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneUpgradeRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.AfterControlPlaneUpgradeResponse": schema_runtime_hooks_api_v1alpha1_AfterControlPlaneUpgradeResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterCreateRequest": schema_runtime_hooks_api_v1alpha1_BeforeClusterCreateRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterCreateResponse": schema_runtime_hooks_api_v1alpha1_BeforeClusterCreateResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterDeleteRequest": schema_runtime_hooks_api_v1alpha1_BeforeClusterDeleteRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterDeleteResponse": schema_runtime_hooks_api_v1alpha1_BeforeClusterDeleteResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterUpgradeRequest": schema_runtime_hooks_api_v1alpha1_BeforeClusterUpgradeRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.BeforeClusterUpgradeResponse": schema_runtime_hooks_api_v1alpha1_BeforeClusterUpgradeResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.Builtins": schema_runtime_hooks_api_v1alpha1_Builtins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterBuiltins": schema_runtime_hooks_api_v1alpha1_ClusterBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterNetworkBuiltins": schema_runtime_hooks_api_v1alpha1_ClusterNetworkBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterTopologyBuiltins": schema_runtime_hooks_api_v1alpha1_ClusterTopologyBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.CommonRequest": schema_runtime_hooks_api_v1alpha1_CommonRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.CommonResponse": schema_runtime_hooks_api_v1alpha1_CommonResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.CommonRetryResponse": schema_runtime_hooks_api_v1alpha1_CommonRetryResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneBuiltins": schema_runtime_hooks_api_v1alpha1_ControlPlaneBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneMachineTemplateBuiltins": schema_runtime_hooks_api_v1alpha1_ControlPlaneMachineTemplateBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneMachineTemplateInfrastructureRefBuiltins": schema_runtime_hooks_api_v1alpha1_ControlPlaneMachineTemplateInfrastructureRefBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoverVariablesRequest": schema_runtime_hooks_api_v1alpha1_DiscoverVariablesRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoverVariablesResponse": schema_runtime_hooks_api_v1alpha1_DiscoverVariablesResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoveryRequest": schema_runtime_hooks_api_v1alpha1_DiscoveryRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.DiscoveryResponse": schema_runtime_hooks_api_v1alpha1_DiscoveryResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ExtensionHandler": schema_runtime_hooks_api_v1alpha1_ExtensionHandler(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesRequest": schema_runtime_hooks_api_v1alpha1_GeneratePatchesRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesRequestItem": schema_runtime_hooks_api_v1alpha1_GeneratePatchesRequestItem(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesResponse": schema_runtime_hooks_api_v1alpha1_GeneratePatchesResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GeneratePatchesResponseItem": schema_runtime_hooks_api_v1alpha1_GeneratePatchesResponseItem(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.GroupVersionHook": schema_runtime_hooks_api_v1alpha1_GroupVersionHook(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.HolderReference": schema_runtime_hooks_api_v1alpha1_HolderReference(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapBuiltins": schema_runtime_hooks_api_v1alpha1_MachineBootstrapBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapConfigRefBuiltins": schema_runtime_hooks_api_v1alpha1_MachineBootstrapConfigRefBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineDeploymentBuiltins": schema_runtime_hooks_api_v1alpha1_MachineDeploymentBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineInfrastructureRefBuiltins": schema_runtime_hooks_api_v1alpha1_MachineInfrastructureRefBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachinePoolBuiltins": schema_runtime_hooks_api_v1alpha1_MachinePoolBuiltins(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ValidateTopologyRequest": schema_runtime_hooks_api_v1alpha1_ValidateTopologyRequest(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ValidateTopologyRequestItem": schema_runtime_hooks_api_v1alpha1_ValidateTopologyRequestItem(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ValidateTopologyResponse": schema_runtime_hooks_api_v1alpha1_ValidateTopologyResponse(ref), + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.Variable": schema_runtime_hooks_api_v1alpha1_Variable(ref), } } @@ -713,6 +725,166 @@ func schema_runtime_hooks_api_v1alpha1_BeforeClusterUpgradeResponse(ref common.R } } +func schema_runtime_hooks_api_v1alpha1_Builtins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "Builtins represents builtin variables exposed through patches.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "cluster": { + SchemaProps: spec.SchemaProps{ + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterBuiltins"), + }, + }, + "controlPlane": { + SchemaProps: spec.SchemaProps{ + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneBuiltins"), + }, + }, + "machineDeployment": { + SchemaProps: spec.SchemaProps{ + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineDeploymentBuiltins"), + }, + }, + "machinePool": { + SchemaProps: spec.SchemaProps{ + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachinePoolBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterBuiltins", "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneBuiltins", "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineDeploymentBuiltins", "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachinePoolBuiltins"}, + } +} + +func schema_runtime_hooks_api_v1alpha1_ClusterBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterBuiltins represents builtin cluster variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + "namespace": { + SchemaProps: spec.SchemaProps{ + Description: "Namespace is the namespace of the cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + "topology": { + SchemaProps: spec.SchemaProps{ + Description: "Topology represents the cluster topology variables.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterTopologyBuiltins"), + }, + }, + "network": { + SchemaProps: spec.SchemaProps{ + Description: "Network represents the cluster network variables.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterNetworkBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterNetworkBuiltins", "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ClusterTopologyBuiltins"}, + } +} + +func schema_runtime_hooks_api_v1alpha1_ClusterNetworkBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterNetworkBuiltins represents builtin cluster network variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "serviceDomain": { + SchemaProps: spec.SchemaProps{ + Description: "ServiceDomain is the domain name for services.", + Type: []string{"string"}, + Format: "", + }, + }, + "services": { + SchemaProps: spec.SchemaProps{ + Description: "Services is the network ranges from which service VIPs are allocated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "pods": { + SchemaProps: spec.SchemaProps{ + Description: "Pods is the network ranges from which Pod networks are allocated.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + "ipFamily": { + SchemaProps: spec.SchemaProps{ + Description: "IPFamily is the IPFamily the Cluster is operating in. One of Invalid, IPv4, IPv6, DualStack. Note: IPFamily is not a concept in Kubernetes. It was originally introduced in CAPI for CAPD. IPFamily may be dropped in a future release. More details at https://github.com/kubernetes-sigs/cluster-api/issues/7521", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_runtime_hooks_api_v1alpha1_ClusterTopologyBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterTopologyBuiltins represents builtin cluster topology variables.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version is the Kubernetes version of the Cluster. NOTE: Please note that this version might temporarily differ from the version of the ControlPlane or workers while an upgrade process is being orchestrated.", + Type: []string{"string"}, + Format: "", + }, + }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "Class is the name of the ClusterClass of the Cluster.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_runtime_hooks_api_v1alpha1_CommonRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -812,6 +984,90 @@ func schema_runtime_hooks_api_v1alpha1_CommonRetryResponse(ref common.ReferenceC } } +func schema_runtime_hooks_api_v1alpha1_ControlPlaneBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ControlPlaneBuiltins represents builtin ControlPlane variables. NOTE: These variables are only set for templates belonging to the ControlPlane object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version is the Kubernetes version of the ControlPlane object. NOTE: Please note that this version is the version we are currently reconciling towards. It can differ from the current version of the ControlPlane while an upgrade process is being orchestrated.", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the ControlPlane, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the value of the replicas field of the ControlPlane object.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "machineTemplate": { + SchemaProps: spec.SchemaProps{ + Description: "MachineTemplate is the value of the .spec.machineTemplate field of the ControlPlane object.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneMachineTemplateBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneMachineTemplateBuiltins"}, + } +} + +func schema_runtime_hooks_api_v1alpha1_ControlPlaneMachineTemplateBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ControlPlaneMachineTemplateBuiltins is the value of the .spec.machineTemplate field of the ControlPlane object.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "infrastructureRef": { + SchemaProps: spec.SchemaProps{ + Description: "InfrastructureRef is the value of the infrastructureRef field of ControlPlane.spec.machineTemplate.", + Default: map[string]interface{}{}, + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneMachineTemplateInfrastructureRefBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.ControlPlaneMachineTemplateInfrastructureRefBuiltins"}, + } +} + +func schema_runtime_hooks_api_v1alpha1_ControlPlaneMachineTemplateInfrastructureRefBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ControlPlaneMachineTemplateInfrastructureRefBuiltins is the value of the infrastructureRef field of ControlPlane.spec.machineTemplate.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the infrastructureRef.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + func schema_runtime_hooks_api_v1alpha1_DiscoverVariablesRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ @@ -1365,6 +1621,191 @@ func schema_runtime_hooks_api_v1alpha1_HolderReference(ref common.ReferenceCallb } } +func schema_runtime_hooks_api_v1alpha1_MachineBootstrapBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineBootstrapBuiltins is the value of the .spec.template.spec.bootstrap field of the MachineDeployment or MachinePool.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "configRef": { + SchemaProps: spec.SchemaProps{ + Description: "ConfigRef is the value of the .spec.template.spec.bootstrap.configRef field of the MachineDeployment.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapConfigRefBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapConfigRefBuiltins"}, + } +} + +func schema_runtime_hooks_api_v1alpha1_MachineBootstrapConfigRefBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineBootstrapConfigRefBuiltins is the value of the .spec.template.spec.bootstrap.configRef field of the MachineDeployment or MachinePool.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the bootstrap.configRef.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_runtime_hooks_api_v1alpha1_MachineDeploymentBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineDeploymentBuiltins represents builtin MachineDeployment variables. NOTE: These variables are only set for templates belonging to a MachineDeployment.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version is the Kubernetes version of the MachineDeployment, to which the current template belongs to. NOTE: Please note that this version is the version we are currently reconciling towards. It can differ from the current version of the MachineDeployment machines while an upgrade process is being orchestrated.", + Type: []string{"string"}, + Format: "", + }, + }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "Class is the class name of the MachineDeployment, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the MachineDeployment, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "topologyName": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyName is the topology name of the MachineDeployment, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the value of the replicas field of the MachineDeployment, to which the current template belongs to.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "bootstrap": { + SchemaProps: spec.SchemaProps{ + Description: "Bootstrap is the value of the .spec.template.spec.bootstrap field of the MachineDeployment.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapBuiltins"), + }, + }, + "infrastructureRef": { + SchemaProps: spec.SchemaProps{ + Description: "InfrastructureRef is the value of the .spec.template.spec.infrastructureRef field of the MachineDeployment.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineInfrastructureRefBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapBuiltins", "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineInfrastructureRefBuiltins"}, + } +} + +func schema_runtime_hooks_api_v1alpha1_MachineInfrastructureRefBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachineInfrastructureRefBuiltins is the value of the .spec.template.spec.infrastructureRef field of the MachineDeployment or MachinePool.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name of the infrastructureRef.", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, + } +} + +func schema_runtime_hooks_api_v1alpha1_MachinePoolBuiltins(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "MachinePoolBuiltins represents builtin MachinePool variables. NOTE: These variables are only set for templates belonging to a MachinePool.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "version": { + SchemaProps: spec.SchemaProps{ + Description: "Version is the Kubernetes version of the MachinePool, to which the current template belongs to. NOTE: Please note that this version is the version we are currently reconciling towards. It can differ from the current version of the MachinePool machines while an upgrade process is being orchestrated.", + Type: []string{"string"}, + Format: "", + }, + }, + "class": { + SchemaProps: spec.SchemaProps{ + Description: "Class is the class name of the MachinePool, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "name": { + SchemaProps: spec.SchemaProps{ + Description: "Name is the name of the MachinePool, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "topologyName": { + SchemaProps: spec.SchemaProps{ + Description: "TopologyName is the topology name of the MachinePool, to which the current template belongs to.", + Type: []string{"string"}, + Format: "", + }, + }, + "replicas": { + SchemaProps: spec.SchemaProps{ + Description: "Replicas is the value of the replicas field of the MachinePool, to which the current template belongs to.", + Type: []string{"integer"}, + Format: "int64", + }, + }, + "bootstrap": { + SchemaProps: spec.SchemaProps{ + Description: "Bootstrap is the value of the .spec.template.spec.bootstrap field of the MachinePool.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapBuiltins"), + }, + }, + "infrastructureRef": { + SchemaProps: spec.SchemaProps{ + Description: "InfrastructureRef is the value of the .spec.template.spec.infrastructureRef field of the MachinePool.", + Ref: ref("sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineInfrastructureRefBuiltins"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineBootstrapBuiltins", "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1.MachineInfrastructureRefBuiltins"}, + } +} + func schema_runtime_hooks_api_v1alpha1_ValidateTopologyRequest(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ From 6e4cbab100e08a93165f13f318f68b0073b77e93 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 28 Mar 2024 13:02:57 +0100 Subject: [PATCH 09/10] Move desired/scope to scope --- exp/topology/desiredstate/desired_state.go | 2 +- exp/topology/desiredstate/desired_state_test.go | 2 +- exp/topology/{desiredstate => }/scope/blueprint.go | 0 exp/topology/{desiredstate => }/scope/blueprint_test.go | 0 exp/topology/{desiredstate => }/scope/doc.go | 0 exp/topology/{desiredstate => }/scope/hookresponsetracker.go | 0 .../{desiredstate => }/scope/hookresponsetracker_test.go | 0 exp/topology/{desiredstate => }/scope/scope.go | 0 exp/topology/{desiredstate => }/scope/scope_test.go | 0 exp/topology/{desiredstate => }/scope/state.go | 0 exp/topology/{desiredstate => }/scope/state_test.go | 0 exp/topology/{desiredstate => }/scope/upgradetracker.go | 0 exp/topology/{desiredstate => }/scope/upgradetracker_test.go | 0 internal/controllers/topology/cluster/blueprint.go | 2 +- internal/controllers/topology/cluster/blueprint_test.go | 2 +- internal/controllers/topology/cluster/cluster_controller.go | 2 +- .../controllers/topology/cluster/cluster_controller_test.go | 2 +- internal/controllers/topology/cluster/conditions.go | 2 +- internal/controllers/topology/cluster/conditions_test.go | 2 +- internal/controllers/topology/cluster/current_state.go | 2 +- internal/controllers/topology/cluster/current_state_test.go | 2 +- internal/controllers/topology/cluster/patches/engine.go | 2 +- internal/controllers/topology/cluster/patches/engine_test.go | 2 +- internal/controllers/topology/cluster/reconcile_state.go | 2 +- internal/controllers/topology/cluster/reconcile_state_test.go | 2 +- .../handlers/topologymutation/handler_integration_test.go | 2 +- 26 files changed, 15 insertions(+), 15 deletions(-) rename exp/topology/{desiredstate => }/scope/blueprint.go (100%) rename exp/topology/{desiredstate => }/scope/blueprint_test.go (100%) rename exp/topology/{desiredstate => }/scope/doc.go (100%) rename exp/topology/{desiredstate => }/scope/hookresponsetracker.go (100%) rename exp/topology/{desiredstate => }/scope/hookresponsetracker_test.go (100%) rename exp/topology/{desiredstate => }/scope/scope.go (100%) rename exp/topology/{desiredstate => }/scope/scope_test.go (100%) rename exp/topology/{desiredstate => }/scope/state.go (100%) rename exp/topology/{desiredstate => }/scope/state_test.go (100%) rename exp/topology/{desiredstate => }/scope/upgradetracker.go (100%) rename exp/topology/{desiredstate => }/scope/upgradetracker_test.go (100%) diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index dedd611b4552..0614607b9b40 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -35,7 +35,7 @@ import ( expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches" diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index 504df993c62f..ff7c68ddebc9 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -40,7 +40,7 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" diff --git a/exp/topology/desiredstate/scope/blueprint.go b/exp/topology/scope/blueprint.go similarity index 100% rename from exp/topology/desiredstate/scope/blueprint.go rename to exp/topology/scope/blueprint.go diff --git a/exp/topology/desiredstate/scope/blueprint_test.go b/exp/topology/scope/blueprint_test.go similarity index 100% rename from exp/topology/desiredstate/scope/blueprint_test.go rename to exp/topology/scope/blueprint_test.go diff --git a/exp/topology/desiredstate/scope/doc.go b/exp/topology/scope/doc.go similarity index 100% rename from exp/topology/desiredstate/scope/doc.go rename to exp/topology/scope/doc.go diff --git a/exp/topology/desiredstate/scope/hookresponsetracker.go b/exp/topology/scope/hookresponsetracker.go similarity index 100% rename from exp/topology/desiredstate/scope/hookresponsetracker.go rename to exp/topology/scope/hookresponsetracker.go diff --git a/exp/topology/desiredstate/scope/hookresponsetracker_test.go b/exp/topology/scope/hookresponsetracker_test.go similarity index 100% rename from exp/topology/desiredstate/scope/hookresponsetracker_test.go rename to exp/topology/scope/hookresponsetracker_test.go diff --git a/exp/topology/desiredstate/scope/scope.go b/exp/topology/scope/scope.go similarity index 100% rename from exp/topology/desiredstate/scope/scope.go rename to exp/topology/scope/scope.go diff --git a/exp/topology/desiredstate/scope/scope_test.go b/exp/topology/scope/scope_test.go similarity index 100% rename from exp/topology/desiredstate/scope/scope_test.go rename to exp/topology/scope/scope_test.go diff --git a/exp/topology/desiredstate/scope/state.go b/exp/topology/scope/state.go similarity index 100% rename from exp/topology/desiredstate/scope/state.go rename to exp/topology/scope/state.go diff --git a/exp/topology/desiredstate/scope/state_test.go b/exp/topology/scope/state_test.go similarity index 100% rename from exp/topology/desiredstate/scope/state_test.go rename to exp/topology/scope/state_test.go diff --git a/exp/topology/desiredstate/scope/upgradetracker.go b/exp/topology/scope/upgradetracker.go similarity index 100% rename from exp/topology/desiredstate/scope/upgradetracker.go rename to exp/topology/scope/upgradetracker.go diff --git a/exp/topology/desiredstate/scope/upgradetracker_test.go b/exp/topology/scope/upgradetracker_test.go similarity index 100% rename from exp/topology/desiredstate/scope/upgradetracker_test.go rename to exp/topology/scope/upgradetracker_test.go diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index fb79c63e0de7..c618668573d8 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -22,7 +22,7 @@ import ( "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" tlog "sigs.k8s.io/cluster-api/internal/log" ) diff --git a/internal/controllers/topology/cluster/blueprint_test.go b/internal/controllers/topology/cluster/blueprint_test.go index 4fe59a4e04cf..0e05f3767fd4 100644 --- a/internal/controllers/topology/cluster/blueprint_test.go +++ b/internal/controllers/topology/cluster/blueprint_test.go @@ -28,7 +28,7 @@ import ( . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/test/builder" ) diff --git a/internal/controllers/topology/cluster/cluster_controller.go b/internal/controllers/topology/cluster/cluster_controller.go index f6720d68f6b6..9e9af3ce1d54 100644 --- a/internal/controllers/topology/cluster/cluster_controller.go +++ b/internal/controllers/topology/cluster/cluster_controller.go @@ -41,7 +41,7 @@ import ( runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/topology/desiredstate" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" diff --git a/internal/controllers/topology/cluster/cluster_controller_test.go b/internal/controllers/topology/cluster/cluster_controller_test.go index 653f2455b57b..8d14c8592ae7 100644 --- a/internal/controllers/topology/cluster/cluster_controller_test.go +++ b/internal/controllers/topology/cluster/cluster_controller_test.go @@ -40,7 +40,7 @@ import ( runtimev1 "sigs.k8s.io/cluster-api/exp/runtime/api/v1alpha1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/hooks" diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 2d53d0da1343..f9bafd9f968f 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -23,7 +23,7 @@ import ( "github.com/pkg/errors" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/internal/controllers/topology/cluster/conditions_test.go b/internal/controllers/topology/cluster/conditions_test.go index 7673522ac652..d19f84e75275 100644 --- a/internal/controllers/topology/cluster/conditions_test.go +++ b/internal/controllers/topology/cluster/conditions_test.go @@ -30,7 +30,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/util/conditions" ) diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index 646738e8e534..fb359b9af917 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -29,7 +29,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" tlog "sigs.k8s.io/cluster-api/internal/log" "sigs.k8s.io/cluster-api/util/labels" diff --git a/internal/controllers/topology/cluster/current_state_test.go b/internal/controllers/topology/cluster/current_state_test.go index 76f965194ec0..e8a29e17c018 100644 --- a/internal/controllers/topology/cluster/current_state_test.go +++ b/internal/controllers/topology/cluster/current_state_test.go @@ -30,7 +30,7 @@ import ( . "sigs.k8s.io/controller-runtime/pkg/envtest/komega" clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/test/builder" "sigs.k8s.io/cluster-api/internal/topology/selectors" ) diff --git a/internal/controllers/topology/cluster/patches/engine.go b/internal/controllers/topology/cluster/patches/engine.go index d5ffb9dc9cde..0257bf824839 100644 --- a/internal/controllers/topology/cluster/patches/engine.go +++ b/internal/controllers/topology/cluster/patches/engine.go @@ -31,7 +31,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/patches/api" diff --git a/internal/controllers/topology/cluster/patches/engine_test.go b/internal/controllers/topology/cluster/patches/engine_test.go index 64ed95d47f47..0946a0fa9b2a 100644 --- a/internal/controllers/topology/cluster/patches/engine_test.go +++ b/internal/controllers/topology/cluster/patches/engine_test.go @@ -35,7 +35,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" fakeruntimeclient "sigs.k8s.io/cluster-api/internal/runtime/client/fake" "sigs.k8s.io/cluster-api/internal/test/builder" diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 43c6326a69f5..b6355d4a57a6 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -38,7 +38,7 @@ import ( clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" expv1 "sigs.k8s.io/cluster-api/exp/api/v1beta1" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" diff --git a/internal/controllers/topology/cluster/reconcile_state_test.go b/internal/controllers/topology/cluster/reconcile_state_test.go index 0d98edd40e77..bdd250f2c97b 100644 --- a/internal/controllers/topology/cluster/reconcile_state_test.go +++ b/internal/controllers/topology/cluster/reconcile_state_test.go @@ -45,7 +45,7 @@ import ( runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/topology/desiredstate" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/internal/contract" "sigs.k8s.io/cluster-api/internal/controllers/topology/cluster/structuredmerge" "sigs.k8s.io/cluster-api/internal/hooks" diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index fbf61c368e42..4e86297d82f4 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -52,7 +52,7 @@ import ( runtimecatalog "sigs.k8s.io/cluster-api/exp/runtime/catalog" runtimehooksv1 "sigs.k8s.io/cluster-api/exp/runtime/hooks/api/v1alpha1" "sigs.k8s.io/cluster-api/exp/topology/desiredstate" - "sigs.k8s.io/cluster-api/exp/topology/desiredstate/scope" + "sigs.k8s.io/cluster-api/exp/topology/scope" "sigs.k8s.io/cluster-api/feature" "sigs.k8s.io/cluster-api/util/contract" "sigs.k8s.io/cluster-api/webhooks" From 347290fdad545e368fd823c2d5f7866cd123e3d0 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Thu, 28 Mar 2024 15:40:39 +0100 Subject: [PATCH 10/10] Address review comments --- exp/topology/desiredstate/desired_state.go | 48 +++++++++---------- .../desiredstate/desired_state_test.go | 28 +++-------- .../handler_integration_test.go | 1 - 3 files changed, 31 insertions(+), 46 deletions(-) diff --git a/exp/topology/desiredstate/desired_state.go b/exp/topology/desiredstate/desired_state.go index 0614607b9b40..2da2cd26b435 100644 --- a/exp/topology/desiredstate/desired_state.go +++ b/exp/topology/desiredstate/desired_state.go @@ -82,7 +82,7 @@ type generator struct { // NOTE: We are assuming all the required objects are provided as input; also, in case of any error, // the entire compute operation will fail. This might be improved in the future if support for reconciling // subset of a topology will be implemented. -func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { +func (g *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.ClusterState, error) { var err error desiredState := &scope.ClusterState{ ControlPlane: &scope.ControlPlaneState{}, @@ -105,7 +105,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // - Building the TopologyReconciled condition. // - Make upgrade decisions on the control plane. // - Making upgrade decisions on machine deployments. - mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, e.Client) + mdUpgradingNames, err := s.Current.MachineDeployments.Upgrading(ctx, g.Client) if err != nil { return nil, errors.Wrap(err, "failed to check if any MachineDeployment is upgrading") } @@ -117,7 +117,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // - Make upgrade decisions on the control plane. // - Making upgrade decisions on machine pools. if len(s.Current.MachinePools) > 0 { - client, err := e.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) + client, err := g.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) if err != nil { return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading") } @@ -131,7 +131,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // Compute the desired state of the ControlPlane object, eventually adding a reference to the // InfrastructureMachineTemplate generated by the previous step. - if desiredState.ControlPlane.Object, err = e.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil { + if desiredState.ControlPlane.Object, err = g.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil { return nil, errors.Wrapf(err, "failed to compute ControlPlane") } @@ -156,7 +156,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // If required, compute the desired state of the MachineDeployments from the list of MachineDeploymentTopologies // defined in the cluster. if s.Blueprint.HasMachineDeployments() { - desiredState.MachineDeployments, err = e.computeMachineDeployments(ctx, s) + desiredState.MachineDeployments, err = g.computeMachineDeployments(ctx, s) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachineDeployments") } @@ -165,7 +165,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // If required, compute the desired state of the MachinePools from the list of MachinePoolTopologies // defined in the cluster. if s.Blueprint.HasMachinePools() { - desiredState.MachinePools, err = e.computeMachinePools(ctx, s) + desiredState.MachinePools, err = g.computeMachinePools(ctx, s) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachinePools") } @@ -177,7 +177,7 @@ func (e *generator) Generate(ctx context.Context, s *scope.Scope) (*scope.Cluste // are preserved during patching. When desired objects are computed their spec is copied from a template, in some cases // further modifications to the spec are made afterwards. In those cases we have to make sure those fields are not overwritten // in apply patches. Some examples are .spec.machineTemplate and .spec.version in control planes. - if err := e.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil { + if err := g.patchEngine.Apply(ctx, s.Blueprint, desiredState); err != nil { return nil, errors.Wrap(err, "failed to apply patches") } @@ -249,7 +249,7 @@ func computeControlPlaneInfrastructureMachineTemplate(_ context.Context, s *scop // computeControlPlane computes the desired state for the ControlPlane object starting from the // corresponding template defined in the blueprint. -func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { +func (g *generator) computeControlPlane(ctx context.Context, s *scope.Scope, infrastructureMachineTemplate *unstructured.Unstructured) (*unstructured.Unstructured, error) { template := s.Blueprint.ControlPlane.Template templateClonedFromRef := s.Blueprint.ClusterClass.Spec.ControlPlane.Ref cluster := s.Current.Cluster @@ -384,7 +384,7 @@ func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf } // Sets the desired Kubernetes version for the control plane. - version, err := e.computeControlPlaneVersion(ctx, s) + version, err := g.computeControlPlaneVersion(ctx, s) if err != nil { return nil, errors.Wrap(err, "failed to compute version of control plane") } @@ -398,7 +398,7 @@ func (e *generator) computeControlPlane(ctx context.Context, s *scope.Scope, inf // computeControlPlaneVersion calculates the version of the desired control plane. // The version is calculated using the state of the current machine deployments, the current control plane // and the version defined in the topology. -func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { +func (g *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Scope) (string, error) { log := tlog.LoggerFrom(ctx) desiredVersion := s.Blueprint.Topology.Version // If we are creating the control plane object (current control plane is nil), use version from topology. @@ -467,7 +467,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco KubernetesVersion: desiredVersion, } hookResponse := &runtimehooksv1.AfterControlPlaneUpgradeResponse{} - if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.AfterControlPlaneUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { return "", err } // Add the response to the tracker so we can later update condition or requeue when required. @@ -479,7 +479,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco if hookResponse.RetryAfterSeconds != 0 { log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) } else { - if err := hooks.MarkAsDone(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { + if err := hooks.MarkAsDone(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { return "", err } } @@ -520,7 +520,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco ToKubernetesVersion: desiredVersion, } hookResponse := &runtimehooksv1.BeforeClusterUpgradeResponse{} - if err := e.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { + if err := g.RuntimeClient.CallAllExtensions(ctx, runtimehooksv1.BeforeClusterUpgrade, s.Current.Cluster, hookRequest, hookResponse); err != nil { return "", err } // Add the response to the tracker so we can later update condition or requeue when required. @@ -533,7 +533,7 @@ func (e *generator) computeControlPlaneVersion(ctx context.Context, s *scope.Sco // We are picking up the new version here. // Track the intent of calling the AfterControlPlaneUpgrade and the AfterClusterUpgrade hooks once we are done with the upgrade. - if err := hooks.MarkAsPending(ctx, e.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { + if err := hooks.MarkAsPending(ctx, g.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade, runtimehooksv1.AfterClusterUpgrade); err != nil { return "", err } } @@ -605,10 +605,10 @@ func calculateRefDesiredAPIVersion(currentRef *corev1.ObjectReference, desiredRe } // computeMachineDeployments computes the desired state of the list of MachineDeployments. -func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { +func (g *generator) computeMachineDeployments(ctx context.Context, s *scope.Scope) (scope.MachineDeploymentsStateMap, error) { machineDeploymentsStateMap := make(scope.MachineDeploymentsStateMap) for _, mdTopology := range s.Blueprint.Topology.Workers.MachineDeployments { - desiredMachineDeployment, err := e.computeMachineDeployment(ctx, s, mdTopology) + desiredMachineDeployment, err := g.computeMachineDeployment(ctx, s, mdTopology) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachineDepoyment for topology %q", mdTopology.Name) } @@ -620,7 +620,7 @@ func (e *generator) computeMachineDeployments(ctx context.Context, s *scope.Scop // computeMachineDeployment computes the desired state for a MachineDeploymentTopology. // The generated machineDeployment object is calculated using the values from the machineDeploymentTopology and // the machineDeployment class. -func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { +func (g *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology) (*scope.MachineDeploymentState, error) { desiredMachineDeployment := &scope.MachineDeploymentState{} // Gets the blueprint for the MachineDeployment class. @@ -699,7 +699,7 @@ func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope // Add ClusterTopologyMachineDeploymentLabel to the generated InfrastructureMachine template infraMachineTemplateLabels[clusterv1.ClusterTopologyMachineDeploymentNameLabel] = machineDeploymentTopology.Name desiredMachineDeployment.InfrastructureMachineTemplate.SetLabels(infraMachineTemplateLabels) - version := e.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment) + version := g.computeMachineDeploymentVersion(s, machineDeploymentTopology, currentMachineDeployment) // Compute values that can be set both in the MachineDeploymentClass and in the MachineDeploymentTopology minReadySeconds := machineDeploymentClass.MinReadySeconds @@ -840,7 +840,7 @@ func (e *generator) computeMachineDeployment(ctx context.Context, s *scope.Scope // computeMachineDeploymentVersion calculates the version of the desired machine deployment. // The version is calculated using the state of the current machine deployments, // the current control plane and the version defined in the topology. -func (e *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { +func (g *generator) computeMachineDeploymentVersion(s *scope.Scope, machineDeploymentTopology clusterv1.MachineDeploymentTopology, currentMDState *scope.MachineDeploymentState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine deployment, mark it as pending if the control plane is not // yet stable. Creating a new MD while the control plane is upgrading can lead to unexpected race conditions. @@ -931,10 +931,10 @@ func isMachineDeploymentDeferred(clusterTopology *clusterv1.Topology, mdTopology } // computeMachinePools computes the desired state of the list of MachinePools. -func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { +func (g *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (scope.MachinePoolsStateMap, error) { machinePoolsStateMap := make(scope.MachinePoolsStateMap) for _, mpTopology := range s.Blueprint.Topology.Workers.MachinePools { - desiredMachinePool, err := e.computeMachinePool(ctx, s, mpTopology) + desiredMachinePool, err := g.computeMachinePool(ctx, s, mpTopology) if err != nil { return nil, errors.Wrapf(err, "failed to compute MachinePool for topology %q", mpTopology.Name) } @@ -946,7 +946,7 @@ func (e *generator) computeMachinePools(ctx context.Context, s *scope.Scope) (sc // computeMachinePool computes the desired state for a MachinePoolTopology. // The generated machinePool object is calculated using the values from the machinePoolTopology and // the machinePool class. -func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { +func (g *generator) computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology) (*scope.MachinePoolState, error) { desiredMachinePool := &scope.MachinePoolState{} // Gets the blueprint for the MachinePool class. @@ -1025,7 +1025,7 @@ func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin // Add ClusterTopologyMachinePoolLabel to the generated InfrastructureMachinePool object infraMachinePoolObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name desiredMachinePool.InfrastructureMachinePoolObject.SetLabels(infraMachinePoolObjectLabels) - version := e.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) + version := g.computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) // Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology minReadySeconds := machinePoolClass.MinReadySeconds @@ -1142,7 +1142,7 @@ func (e *generator) computeMachinePool(_ context.Context, s *scope.Scope, machin // computeMachinePoolVersion calculates the version of the desired machine pool. // The version is calculated using the state of the current machine pools, // the current control plane and the version defined in the topology. -func (e *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { +func (g *generator) computeMachinePoolVersion(s *scope.Scope, machinePoolTopology clusterv1.MachinePoolTopology, currentMPState *scope.MachinePoolState) string { desiredVersion := s.Blueprint.Topology.Version // If creating a new machine pool, mark it as pending if the control plane is not // yet stable. Creating a new MP while the control plane is upgrading can lead to unexpected race conditions. diff --git a/exp/topology/desiredstate/desired_state_test.go b/exp/topology/desiredstate/desired_state_test.go index ff7c68ddebc9..7ef777cdf1aa 100644 --- a/exp/topology/desiredstate/desired_state_test.go +++ b/exp/topology/desiredstate/desired_state_test.go @@ -338,9 +338,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -399,9 +397,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(cluster) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -429,9 +425,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithoutReplicas) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -474,9 +468,7 @@ func TestComputeControlPlane(t *testing.T) { s.Blueprint = blueprint s.Current.ControlPlane = &scope.ControlPlaneState{} - r := &generator{} - - obj, err := r.computeControlPlane(ctx, s, infrastructureMachineTemplate) + obj, err := (&generator{}).computeControlPlane(ctx, s, infrastructureMachineTemplate) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -535,9 +527,7 @@ func TestComputeControlPlane(t *testing.T) { scope := scope.New(clusterWithControlPlaneRef) scope.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, scope, nil) + obj, err := (&generator{}).computeControlPlane(ctx, scope, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) @@ -605,9 +595,7 @@ func TestComputeControlPlane(t *testing.T) { Object: tt.currentControlPlane, } - r := &generator{} - - obj, err := r.computeControlPlane(ctx, s, nil) + obj, err := (&generator{}).computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).NotTo(BeNil()) assertNestedField(g, obj, tt.expectedVersion, contract.ControlPlane().Version().Path()...) @@ -645,9 +633,7 @@ func TestComputeControlPlane(t *testing.T) { s.Current.ControlPlane.Object.SetOwnerReferences([]metav1.OwnerReference{*ownerrefs.OwnerReferenceTo(shim, corev1.SchemeGroupVersion.WithKind("Secret"))}) s.Blueprint = blueprint - r := &generator{} - - obj, err := r.computeControlPlane(ctx, s, nil) + obj, err := (&generator{}).computeControlPlane(ctx, s, nil) g.Expect(err).ToNot(HaveOccurred()) g.Expect(obj).ToNot(BeNil()) g.Expect(ownerrefs.HasOwnerReferenceFrom(obj, shim)).To(BeTrue()) diff --git a/test/extension/handlers/topologymutation/handler_integration_test.go b/test/extension/handlers/topologymutation/handler_integration_test.go index 4e86297d82f4..4a1391d8430d 100644 --- a/test/extension/handlers/topologymutation/handler_integration_test.go +++ b/test/extension/handlers/topologymutation/handler_integration_test.go @@ -223,7 +223,6 @@ func generateCRDForUnstructured(u *unstructured.Unstructured) *apiextensionsv1.C } // getScope gets blueprint (ClusterClass) and current state based on cluster and clusterClassFile. -// Note: MachinePools have not been implemented as they are not supported by CAPV. func getScope(cluster *clusterv1.Cluster, clusterClassFile string) (*scope.Scope, error) { clusterClassYAML, err := os.ReadFile(clusterClassFile) //nolint:gosec // reading a file in tests is not a security issue. if err != nil {