diff --git a/internal/controllers/topology/cluster/blueprint.go b/internal/controllers/topology/cluster/blueprint.go index eb5cde78a4b8..b57d5fae2475 100644 --- a/internal/controllers/topology/cluster/blueprint.go +++ b/internal/controllers/topology/cluster/blueprint.go @@ -80,7 +80,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get infrastructure machine template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class) } - // Get the bootstrap machine template. + // Get the bootstrap config template. machineDeploymentBlueprint.BootstrapTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Bootstrap.Ref) if err != nil { return nil, errors.Wrapf(err, "failed to get bootstrap config template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class) @@ -109,7 +109,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class) } - // Get the bootstrap config. + // Get the bootstrap config template. machinePoolBlueprint.BootstrapTemplate, err = r.getReference(ctx, machinePoolClass.Template.Bootstrap.Ref) if err != nil { return nil, errors.Wrapf(err, "failed to get bootstrap config for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class) diff --git a/internal/controllers/topology/cluster/conditions.go b/internal/controllers/topology/cluster/conditions.go index 30390a5989cc..79ed85b31259 100644 --- a/internal/controllers/topology/cluster/conditions.go +++ b/internal/controllers/topology/cluster/conditions.go @@ -106,15 +106,18 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste } // The topology is not considered as fully reconciled if one of the following is true: - // * either the Control Plane or any of the MachineDeployments are still pending to pick up the new version + // * either the Control Plane or any of the MachineDeployments/MachinePools are still pending to pick up the new version // (generally happens when upgrading the cluster) - // * when there are MachineDeployments for which the upgrade has been deferred - // * when new MachineDeployments are pending to be created + // * when there are MachineDeployments/MachinePools for which the upgrade has been deferred + // * when new MachineDeployments/MachinePools are pending to be created // (generally happens when upgrading the cluster) if s.UpgradeTracker.ControlPlane.IsPendingUpgrade || s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() || s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() || - s.UpgradeTracker.MachineDeployments.DeferredUpgrade() { + s.UpgradeTracker.MachineDeployments.DeferredUpgrade() || + s.UpgradeTracker.MachinePools.IsAnyPendingCreate() || + s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() || + s.UpgradeTracker.MachinePools.DeferredUpgrade() { msgBuilder := &strings.Builder{} var reason string @@ -180,6 +183,11 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste fmt.Fprintf(msgBuilder, " MachineDeployment(s) %s are upgrading", computeNameList(s.UpgradeTracker.MachineDeployments.UpgradingNames()), ) + + case len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0: + fmt.Fprintf(msgBuilder, " MachinePool(s) %s are upgrading", + computeNameList(s.UpgradeTracker.MachinePools.UpgradingNames()), + ) } conditions.Set( diff --git a/internal/controllers/topology/cluster/current_state.go b/internal/controllers/topology/cluster/current_state.go index 46c891149413..ecd52ef4ef96 100644 --- a/internal/controllers/topology/cluster/current_state.go +++ b/internal/controllers/topology/cluster/current_state.go @@ -289,6 +289,9 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa state := make(scope.MachinePoolsStateMap) // List all the machine pools in the current cluster and in a managed topology. + // Note: This is a cached list call. We ensure in reconcile_state that the cache is up-to-date + // after we create/update a MachinePool and we double-check if an MP already exists before + // we create it. mp := &expv1.MachinePoolList{} err := r.Client.List(ctx, mp, client.MatchingLabels{ @@ -328,7 +331,7 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa // Gets the infraRef. infraRef := &m.Spec.Template.Spec.InfrastructureRef if infraRef.Name == "" { - return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachineTemplate", tlog.KObj{Obj: m}) + return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachinePool", tlog.KObj{Obj: m}) } // If the mpTopology exists in the Cluster, lookup the corresponding mpBluePrint and align diff --git a/internal/controllers/topology/cluster/desired_state.go b/internal/controllers/topology/cluster/desired_state.go index fe8a7df64fb6..0aed86b5c517 100644 --- a/internal/controllers/topology/cluster/desired_state.go +++ b/internal/controllers/topology/cluster/desired_state.go @@ -84,7 +84,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (* if len(s.Current.MachinePools) > 0 { client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading") } // Mark all the MachinePools that are currently upgrading. mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client) @@ -438,7 +438,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc // change the UpgradeTracker accordingly, otherwise the hook call is completed and we // can remove this hook from the list of pending-hooks. if hookResponse.RetryAfterSeconds != 0 { - log.Infof("MachineDeployments upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) + log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade)) } else { if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil { return "", err @@ -464,10 +464,11 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc } // If the control plane is not upgrading or scaling, we can assume the control plane is stable. - // However, we should also check for the MachineDeployments upgrading. - // If the MachineDeployments are upgrading, then do not pick up the desiredVersion yet. - // We will pick up the new version after the MachineDeployments finish upgrading. - if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 { + // However, we should also check for the MachineDeployments/MachinePools upgrading. + // If the MachineDeployments/MachinePools are upgrading, then do not pick up the desiredVersion yet. + // We will pick up the new version after the MachineDeployments/MachinePools finish upgrading. + if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 || + len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0 { return *currentVersion, nil } @@ -950,7 +951,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c template: machinePoolBlueprint.BootstrapTemplate, templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapTemplate), cluster: s.Current.Cluster, - namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), + namePrefix: bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name), currentObjectRef: currentBootstrapConfigRef, }) if err != nil { @@ -961,11 +962,11 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c if bootstrapObjectLabels == nil { bootstrapObjectLabels = map[string]string{} } - // Add ClusterTopologyMachinePoolLabel to the generated Bootstrap template + // Add ClusterTopologyMachinePoolLabel to the generated Bootstrap config bootstrapObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name desiredMachinePool.BootstrapObject.SetLabels(bootstrapObjectLabels) - // Compute the Infrastructure ref. + // Compute the InfrastructureMachinePool. var currentInfraMachinePoolRef *corev1.ObjectReference if currentMachinePool != nil && currentMachinePool.InfrastructureMachinePoolObject != nil { currentInfraMachinePoolRef = ¤tMachinePool.Object.Spec.Template.Spec.InfrastructureRef @@ -991,6 +992,11 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c version := computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool) // Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology + minReadySeconds := machinePoolClass.MinReadySeconds + if machinePoolTopology.MinReadySeconds != nil { + minReadySeconds = machinePoolTopology.MinReadySeconds + } + failureDomains := machinePoolClass.FailureDomains if machinePoolTopology.FailureDomains != nil { failureDomains = machinePoolTopology.FailureDomains @@ -1031,8 +1037,9 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c Namespace: s.Current.Cluster.Namespace, }, Spec: expv1.MachinePoolSpec{ - ClusterName: s.Current.Cluster.Name, - FailureDomains: failureDomains, + ClusterName: s.Current.Cluster.Name, + MinReadySeconds: minReadySeconds, + FailureDomains: failureDomains, Template: clusterv1.MachineTemplateSpec{ Spec: clusterv1.MachineSpec{ ClusterName: s.Current.Cluster.Name, diff --git a/internal/controllers/topology/cluster/patches/patch.go b/internal/controllers/topology/cluster/patches/patch.go index 29ce0fd56a65..c6a4b7811171 100644 --- a/internal/controllers/topology/cluster/patches/patch.go +++ b/internal/controllers/topology/cluster/patches/patch.go @@ -57,7 +57,7 @@ func (i PreserveFields) ApplyToHelper(opts *PatchOptions) { opts.preserveFields = i } -// patchObject overwrites spec in object with spec.template.spec of patchedTemplate, +// patchObject overwrites spec in object with spec.template.spec of modifiedObject, // while preserving the configured fields. // For example, ControlPlane.spec will be overwritten with the patched // ControlPlaneTemplate.spec.template.spec but preserving spec.version and spec.replicas @@ -66,7 +66,7 @@ func patchObject(ctx context.Context, object, modifiedObject *unstructured.Unstr return patchUnstructured(ctx, object, modifiedObject, "spec.template.spec", "spec", opts...) } -// patchTemplate overwrites spec.template.spec in template with spec.template.spec of patchedTemplate, +// patchTemplate overwrites spec.template.spec in template with spec.template.spec of modifiedTemplate, // while preserving the configured fields. // For example, it's possible to patch BootstrapTemplate.spec.template.spec with a patched // BootstrapTemplate.spec.template.spec while preserving fields configured via opts.fieldsToPreserve. diff --git a/internal/controllers/topology/cluster/patches/template.go b/internal/controllers/topology/cluster/patches/template.go index 892058b7b054..55264cdfdd02 100644 --- a/internal/controllers/topology/cluster/patches/template.go +++ b/internal/controllers/topology/cluster/patches/template.go @@ -91,7 +91,7 @@ func (t *requestItemBuilder) Build() (*runtimehooksv1.GeneratePatchesRequestItem } // getTemplateAsUnstructured is a utility func that returns a template matching the holderKind, holderFieldPath -// and mdTopologyName from a GeneratePatchesRequest. +// and topologyNames from a GeneratePatchesRequest. func getTemplateAsUnstructured(req *runtimehooksv1.GeneratePatchesRequest, holderKind, holderFieldPath string, topologyNames requestTopologyName) (*unstructured.Unstructured, error) { // Find the requestItem. requestItem := getRequestItem(req, holderKind, holderFieldPath, topologyNames) @@ -119,7 +119,7 @@ func getRequestItemByUID(req *runtimehooksv1.GeneratePatchesRequest, uid types.U return nil } -// getRequestItem is a utility func that returns a template matching the holderKind, holderFiledPath and mdTopologyName from a GeneratePatchesRequest. +// getRequestItem is a utility func that returns a template matching the holderKind, holderFiledPath and topologyNames from a GeneratePatchesRequest. func getRequestItem(req *runtimehooksv1.GeneratePatchesRequest, holderKind, holderFieldPath string, topologyNames requestTopologyName) *runtimehooksv1.GeneratePatchesRequestItem { for _, template := range req.Items { if holderKind != "" && template.HolderReference.Kind != holderKind { diff --git a/internal/controllers/topology/cluster/reconcile_state.go b/internal/controllers/topology/cluster/reconcile_state.go index 6fc58cb21ba9..014523c88c3d 100644 --- a/internal/controllers/topology/cluster/reconcile_state.go +++ b/internal/controllers/topology/cluster/reconcile_state.go @@ -250,9 +250,9 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope // Call the registered extensions for the hook after the cluster is fully upgraded. // A clusters is considered fully upgraded if: // - Control plane is stable (not upgrading, not scaling, not about to upgrade) - // - MachineDeployments are not currently upgrading - // - MachineDeployments are not pending an upgrade - // - MachineDeployments are not pending create + // - MachineDeployments/MachinePools are not currently upgrading + // - MachineDeployments/MachinePools are not pending an upgrade + // - MachineDeployments/MachinePools are not pending create if isControlPlaneStable(s) && // Control Plane stable checks len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade !s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create @@ -737,6 +737,10 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope) // Create MachinePools. if len(diff.toCreate) > 0 { + // In current state we only got the MP list via a cached call. + // As a consequence, in order to prevent the creation of duplicate MP due to stale reads, + // we are now using a live client to double-check here that the MachinePool + // to be created doesn't exist yet. currentMPTopologyNames, err := r.getCurrentMachinePools(ctx, s) if err != nil { return err @@ -806,8 +810,6 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope) // createMachinePool creates a MachinePool and the corresponding templates. func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *scope.MachinePoolState) error { // Do not create the MachinePool if it is marked as pending create. - // This will also block MHC creation because creating the MHC without the corresponding - // MachinePool is unnecessary. mpTopologyName, ok := mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel] if !ok || mpTopologyName == "" { // Note: This is only an additional safety check and should not happen. The label will always be added when computing @@ -868,7 +870,7 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp * return nil } -// updateMachinePool updates a MachinePool. Also rotates the corresponding Templates if necessary. +// updateMachinePool updates a MachinePool. Also updates the corresponding objects if necessary. func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, currentMP, desiredMP *scope.MachinePoolState) error { log := tlog.LoggerFrom(ctx).WithMachinePool(desiredMP.Object) @@ -882,20 +884,18 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, curr cluster := s.Current.Cluster infraCtx, _ := log.WithObject(desiredMP.InfrastructureMachinePoolObject).Into(ctx) if err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{ - cluster: cluster, - current: currentMP.InfrastructureMachinePoolObject, - desired: desiredMP.InfrastructureMachinePoolObject, - versionGetter: contract.ControlPlane().Version().Get, + cluster: cluster, + current: currentMP.InfrastructureMachinePoolObject, + desired: desiredMP.InfrastructureMachinePoolObject, }); err != nil { return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object}) } bootstrapCtx, _ := log.WithObject(desiredMP.BootstrapObject).Into(ctx) if err := r.reconcileReferencedObject(bootstrapCtx, reconcileReferencedObjectInput{ - cluster: cluster, - current: currentMP.BootstrapObject, - desired: desiredMP.BootstrapObject, - versionGetter: contract.ControlPlane().Version().Get, + cluster: cluster, + current: currentMP.BootstrapObject, + desired: desiredMP.BootstrapObject, }); err != nil { return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object}) } diff --git a/internal/controllers/topology/cluster/scope/blueprint.go b/internal/controllers/topology/cluster/scope/blueprint.go index 1f93ba84258e..08e734505f80 100644 --- a/internal/controllers/topology/cluster/scope/blueprint.go +++ b/internal/controllers/topology/cluster/scope/blueprint.go @@ -87,7 +87,7 @@ type MachinePoolBlueprint struct { // BootstrapTemplate holds the bootstrap template for a MachinePool referenced from ClusterClass. BootstrapTemplate *unstructured.Unstructured - // InfrastructureMachinePoolTemplate holds the infrastructure machine template for a MachinePool referenced from ClusterClass. + // InfrastructureMachinePoolTemplate holds the infrastructure machine pool template for a MachinePool referenced from ClusterClass. InfrastructureMachinePoolTemplate *unstructured.Unstructured } diff --git a/internal/controllers/topology/cluster/scope/scope_test.go b/internal/controllers/topology/cluster/scope/scope_test.go index 2dd3a00cb85a..75db22d9c3bc 100644 --- a/internal/controllers/topology/cluster/scope/scope_test.go +++ b/internal/controllers/topology/cluster/scope/scope_test.go @@ -26,7 +26,7 @@ import ( ) func TestNew(t *testing.T) { - t.Run("should set the right maxMachineDeploymentUpgradeConcurrency in UpgradeTracker from the cluster annotation", func(t *testing.T) { + t.Run("should set the right maxUpgradeConcurrency in UpgradeTracker from the cluster annotation", func(t *testing.T) { tests := []struct { name string cluster *clusterv1.Cluster @@ -53,7 +53,8 @@ func TestNew(t *testing.T) { for _, tt := range tests { g := NewWithT(t) s := New(tt.cluster) - g.Expect(s.UpgradeTracker.MachineDeployments.maxMachineDeploymentUpgradeConcurrency).To(Equal(tt.want)) + g.Expect(s.UpgradeTracker.MachineDeployments.maxUpgradeConcurrency).To(Equal(tt.want)) + g.Expect(s.UpgradeTracker.MachinePools.maxUpgradeConcurrency).To(Equal(tt.want)) } }) } diff --git a/internal/controllers/topology/cluster/scope/state.go b/internal/controllers/topology/cluster/scope/state.go index 7b2ca82a12a5..23d6239cef5e 100644 --- a/internal/controllers/topology/cluster/scope/state.go +++ b/internal/controllers/topology/cluster/scope/state.go @@ -161,7 +161,7 @@ type MachinePoolState struct { } // IsUpgrading determines if the MachinePool is upgrading. -// A machine deployment is considered upgrading if at least one of the Machines of this +// A machine pool is considered upgrading if at least one of the Machines of this // MachinePool has a different version. func (mp *MachinePoolState) IsUpgrading(ctx context.Context, c client.Client) (bool, error) { // If the MachinePool has no version there is no definitive way to check if it is upgrading. Therefore, return false. diff --git a/internal/controllers/topology/cluster/scope/upgradetracker.go b/internal/controllers/topology/cluster/scope/upgradetracker.go index 9f7897d284fc..5249e7ba2a29 100644 --- a/internal/controllers/topology/cluster/scope/upgradetracker.go +++ b/internal/controllers/topology/cluster/scope/upgradetracker.go @@ -21,8 +21,8 @@ import "k8s.io/apimachinery/pkg/util/sets" // UpgradeTracker is a helper to capture the upgrade status and make upgrade decisions. type UpgradeTracker struct { ControlPlane ControlPlaneUpgradeTracker - MachineDeployments MachineDeploymentUpgradeTracker - MachinePools MachinePoolUpgradeTracker + MachineDeployments WorkerUpgradeTracker + MachinePools WorkerUpgradeTracker } // ControlPlaneUpgradeTracker holds the current upgrade status of the Control Plane. @@ -33,7 +33,7 @@ type ControlPlaneUpgradeTracker struct { // Example cases when IsPendingUpgrade is set to true: // - Upgrade is blocked by BeforeClusterUpgrade hook // - Upgrade is blocked because the current ControlPlane is not stable (provisioning OR scaling OR upgrading) - // - Upgrade is blocked because any of the current MachineDeployments are upgrading. + // - Upgrade is blocked because any of the current MachineDeployments or MachinePools are upgrading. IsPendingUpgrade bool // IsProvisioning is true if the current Control Plane is being provisioned for the first time. False otherwise. @@ -65,75 +65,40 @@ type ControlPlaneUpgradeTracker struct { IsScaling bool } -// MachineDeploymentUpgradeTracker holds the current upgrade status of MachineDeployments. -type MachineDeploymentUpgradeTracker struct { - // pendingCreateTopologyNames is the set of MachineDeployment topology names that are newly added to the +// WorkerUpgradeTracker holds the current upgrade status of MachineDeployments or MachinePools. +type WorkerUpgradeTracker struct { + // pendingCreateTopologyNames is the set of MachineDeployment/MachinePool topology names that are newly added to the // Cluster Topology but will not be created in the current reconcile loop. - // By marking a MachineDeployment topology as pendingCreate we skip creating the MachineDeployment. - // Nb. We use MachineDeployment topology names instead of MachineDeployment names because the new MachineDeployment - // names can keep changing for each reconcile loop leading to continuous updates to the TopologyReconciled condition. + // By marking a MachineDeployment/MachinePool topology as pendingCreate we skip creating the MachineDeployment/MachinePool. + // Nb. We use MachineDeployment/MachinePool topology names instead of MachineDeployment/MachinePool names because the new + // MachineDeployment/MachinePool names can keep changing for each reconcile loop leading to continuous updates to the + // TopologyReconciled condition. pendingCreateTopologyNames sets.Set[string] - // pendingUpgradeNames is the set of MachineDeployment names that are not going to pick up the new version + // pendingUpgradeNames is the set of MachineDeployment/MachinePool names that are not going to pick up the new version // in the current reconcile loop. - // By marking a MachineDeployment as pendingUpgrade we skip reconciling the MachineDeployment. + // By marking a MachineDeployment/MachinePool as pendingUpgrade we skip reconciling the MachineDeployment/MachinePool. pendingUpgradeNames sets.Set[string] - // deferredNames is the set of MachineDeployment names that are not going to pick up the new version + // deferredNames is the set of MachineDeployment/MachinePool names that are not going to pick up the new version // in the current reconcile loop because they are deferred by the user. - // Note: If a MachineDeployment is marked as deferred it should also be marked as pendingUpgrade. + // Note: If a MachineDeployment/MachinePool is marked as deferred it should also be marked as pendingUpgrade. deferredNames sets.Set[string] - // upgradingNames is the set of MachineDeployment names that are upgrading. This set contains the names of - // MachineDeployments that are currently upgrading and the names of MachineDeployments that will pick up the upgrade - // in the current reconcile loop. - // Note: This information is used to: - // - decide if ControlPlane can be upgraded. - // - calculate MachineDeployment upgrade concurrency. - // - update TopologyReconciled Condition. - // - decide if the AfterClusterUpgrade hook can be called. - upgradingNames sets.Set[string] - - // maxMachineDeploymentUpgradeConcurrency defines the maximum number of MachineDeployments that should be in an - // upgrading state. This includes the MachineDeployments that are currently upgrading and the MachineDeployments that - // will start the upgrade after the current reconcile loop. - maxMachineDeploymentUpgradeConcurrency int -} - -// MachinePoolUpgradeTracker holds the current upgrade status and makes upgrade -// decisions for MachinePools. -type MachinePoolUpgradeTracker struct { - // pendingCreateTopologyNames is the set of MachinePool topology names that are newly added to the - // Cluster Topology but will not be created in the current reconcile loop. - // By marking a MachinePool topology as pendingCreate we skip creating the MachinePool. - // Nb. We use MachinePool topology names instead of MachinePool names because the new MachinePool - // names can keep changing for each reconcile loop leading to continuous updates to the TopologyReconciled condition. - pendingCreateTopologyNames sets.Set[string] - - // pendingUpgradeNames is the set of MachinePool names that are not going to pick up the new version - // in the current reconcile loop. - // By marking a MachinePool as pendingUpgrade we skip reconciling the MachinePool. - pendingUpgradeNames sets.Set[string] - - // deferredNames is the set of MachinePool names that are not going to pick up the new version - // in the current reconcile loop because they are deferred by the user. - // Note: If a MachinePool is marked as deferred it should also be marked as pendingUpgrade. - deferredNames sets.Set[string] - - // upgradingNames is the set of MachinePool names that are upgrading. This set contains the names of - // MachinePools that are currently upgrading and the names of MachinePools that will pick up the upgrade - // in the current reconcile loop. + // upgradingNames is the set of MachineDeployment/MachinePool names that are upgrading. This set contains the names of + // MachineDeployments/MachinePools that are currently upgrading and the names of MachineDeployments/MachinePools that + // will pick up the upgrade in the current reconcile loop. // Note: This information is used to: // - decide if ControlPlane can be upgraded. - // - calculate MachinePool upgrade concurrency. + // - calculate MachineDeployment/MachinePool upgrade concurrency. // - update TopologyReconciled Condition. // - decide if the AfterClusterUpgrade hook can be called. upgradingNames sets.Set[string] - // maxMachinePoolUpgradeConcurrency defines the maximum number of MachinePools that should be in an - // upgrading state. This includes the MachinePools that are currently upgrading and the MachinePools that - // will start the upgrade after the current reconcile loop. - maxMachinePoolUpgradeConcurrency int + // maxUpgradeConcurrency defines the maximum number of MachineDeployments/MachinePools that should be in an + // upgrading state. This includes the MachineDeployments/MachinePools that are currently upgrading and the + // MachineDeployments/MachinePools that will start the upgrade after the current reconcile loop. + maxUpgradeConcurrency int } // UpgradeTrackerOptions contains the options for NewUpgradeTracker. @@ -180,25 +145,25 @@ func NewUpgradeTracker(opts ...UpgradeTrackerOption) *UpgradeTracker { options.maxMPUpgradeConcurrency = 1 } return &UpgradeTracker{ - MachineDeployments: MachineDeploymentUpgradeTracker{ - pendingCreateTopologyNames: sets.Set[string]{}, - pendingUpgradeNames: sets.Set[string]{}, - deferredNames: sets.Set[string]{}, - upgradingNames: sets.Set[string]{}, - maxMachineDeploymentUpgradeConcurrency: options.maxMDUpgradeConcurrency, + MachineDeployments: WorkerUpgradeTracker{ + pendingCreateTopologyNames: sets.Set[string]{}, + pendingUpgradeNames: sets.Set[string]{}, + deferredNames: sets.Set[string]{}, + upgradingNames: sets.Set[string]{}, + maxUpgradeConcurrency: options.maxMDUpgradeConcurrency, }, - MachinePools: MachinePoolUpgradeTracker{ - pendingCreateTopologyNames: sets.Set[string]{}, - pendingUpgradeNames: sets.Set[string]{}, - deferredNames: sets.Set[string]{}, - upgradingNames: sets.Set[string]{}, - maxMachinePoolUpgradeConcurrency: options.maxMPUpgradeConcurrency, + MachinePools: WorkerUpgradeTracker{ + pendingCreateTopologyNames: sets.Set[string]{}, + pendingUpgradeNames: sets.Set[string]{}, + deferredNames: sets.Set[string]{}, + upgradingNames: sets.Set[string]{}, + maxUpgradeConcurrency: options.maxMPUpgradeConcurrency, }, } } -// MarkUpgrading marks a MachineDeployment as currently upgrading or about to upgrade. -func (m *MachineDeploymentUpgradeTracker) MarkUpgrading(names ...string) { +// MarkUpgrading marks a MachineDeployment/MachinePool as currently upgrading or about to upgrade. +func (m *WorkerUpgradeTracker) MarkUpgrading(names ...string) { for _, name := range names { m.upgradingNames.Insert(name) } @@ -206,161 +171,77 @@ func (m *MachineDeploymentUpgradeTracker) MarkUpgrading(names ...string) { // UpgradingNames returns the list of machine deployments that are upgrading or // are about to upgrade. -func (m *MachineDeploymentUpgradeTracker) UpgradingNames() []string { +func (m *WorkerUpgradeTracker) UpgradingNames() []string { return sets.List(m.upgradingNames) } -// UpgradeConcurrencyReached returns true if the number of MachineDeployments upgrading is at the concurrency limit. -func (m *MachineDeploymentUpgradeTracker) UpgradeConcurrencyReached() bool { - return m.upgradingNames.Len() >= m.maxMachineDeploymentUpgradeConcurrency +// UpgradeConcurrencyReached returns true if the number of MachineDeployments/MachinePools upgrading is at the concurrency limit. +func (m *WorkerUpgradeTracker) UpgradeConcurrencyReached() bool { + return m.upgradingNames.Len() >= m.maxUpgradeConcurrency } // MarkPendingCreate marks a machine deployment topology that is pending to be created. // This is generally used to capture machine deployments that are yet to be created // because the control plane is not yet stable. -func (m *MachineDeploymentUpgradeTracker) MarkPendingCreate(mdTopologyName string) { +func (m *WorkerUpgradeTracker) MarkPendingCreate(mdTopologyName string) { m.pendingCreateTopologyNames.Insert(mdTopologyName) } -// IsPendingCreate returns true is the MachineDeployment topology is marked as pending create. -func (m *MachineDeploymentUpgradeTracker) IsPendingCreate(mdTopologyName string) bool { +// IsPendingCreate returns true is the MachineDeployment/MachinePool topology is marked as pending create. +func (m *WorkerUpgradeTracker) IsPendingCreate(mdTopologyName string) bool { return m.pendingCreateTopologyNames.Has(mdTopologyName) } // IsAnyPendingCreate returns true if any of the machine deployments are pending // to be created. Returns false, otherwise. -func (m *MachineDeploymentUpgradeTracker) IsAnyPendingCreate() bool { +func (m *WorkerUpgradeTracker) IsAnyPendingCreate() bool { return len(m.pendingCreateTopologyNames) != 0 } // PendingCreateTopologyNames returns the list of machine deployment topology names that // are pending create. -func (m *MachineDeploymentUpgradeTracker) PendingCreateTopologyNames() []string { +func (m *WorkerUpgradeTracker) PendingCreateTopologyNames() []string { return sets.List(m.pendingCreateTopologyNames) } // MarkPendingUpgrade marks a machine deployment as in need of an upgrade. // This is generally used to capture machine deployments that have not yet // picked up the topology version. -func (m *MachineDeploymentUpgradeTracker) MarkPendingUpgrade(name string) { +func (m *WorkerUpgradeTracker) MarkPendingUpgrade(name string) { m.pendingUpgradeNames.Insert(name) } -// IsPendingUpgrade returns true is the MachineDeployment marked as pending upgrade. -func (m *MachineDeploymentUpgradeTracker) IsPendingUpgrade(name string) bool { +// IsPendingUpgrade returns true is the MachineDeployment/MachinePool marked as pending upgrade. +func (m *WorkerUpgradeTracker) IsPendingUpgrade(name string) bool { return m.pendingUpgradeNames.Has(name) } // IsAnyPendingUpgrade returns true if any of the machine deployments are pending // an upgrade. Returns false, otherwise. -func (m *MachineDeploymentUpgradeTracker) IsAnyPendingUpgrade() bool { +func (m *WorkerUpgradeTracker) IsAnyPendingUpgrade() bool { return len(m.pendingUpgradeNames) != 0 } // PendingUpgradeNames returns the list of machine deployment names that // are pending an upgrade. -func (m *MachineDeploymentUpgradeTracker) PendingUpgradeNames() []string { - return sets.List(m.pendingUpgradeNames) -} - -// MarkDeferredUpgrade marks that the upgrade for a MachineDeployment -// has been deferred. -func (m *MachineDeploymentUpgradeTracker) MarkDeferredUpgrade(name string) { - m.deferredNames.Insert(name) -} - -// DeferredUpgradeNames returns the list of MachineDeployment names for -// which the upgrade has been deferred. -func (m *MachineDeploymentUpgradeTracker) DeferredUpgradeNames() []string { - return sets.List(m.deferredNames) -} - -// DeferredUpgrade returns true if the upgrade has been deferred for any of the -// MachineDeployments. Returns false, otherwise. -func (m *MachineDeploymentUpgradeTracker) DeferredUpgrade() bool { - return len(m.deferredNames) != 0 -} - -// MarkUpgrading marks a MachinePool as currently upgrading or about to upgrade. -func (m *MachinePoolUpgradeTracker) MarkUpgrading(names ...string) { - for _, name := range names { - m.upgradingNames.Insert(name) - } -} - -// UpgradingNames returns the list of machine pools that are upgrading or -// are about to upgrade. -func (m *MachinePoolUpgradeTracker) UpgradingNames() []string { - return sets.List(m.upgradingNames) -} - -// UpgradeConcurrencyReached returns true if the number of MachinePools upgrading is at the concurrency limit. -func (m *MachinePoolUpgradeTracker) UpgradeConcurrencyReached() bool { - return m.upgradingNames.Len() >= m.maxMachinePoolUpgradeConcurrency -} - -// MarkPendingCreate marks a machine pool topology that is pending to be created. -// This is generally used to capture machine pools that are yet to be created -// because the control plane is not yet stable. -func (m *MachinePoolUpgradeTracker) MarkPendingCreate(mdTopologyName string) { - m.pendingCreateTopologyNames.Insert(mdTopologyName) -} - -// IsPendingCreate returns true is the MachinePool topology is marked as pending create. -func (m *MachinePoolUpgradeTracker) IsPendingCreate(mdTopologyName string) bool { - return m.pendingCreateTopologyNames.Has(mdTopologyName) -} - -// IsAnyPendingCreate returns true if any of the machine pools are pending -// to be created. Returns false, otherwise. -func (m *MachinePoolUpgradeTracker) IsAnyPendingCreate() bool { - return len(m.pendingCreateTopologyNames) != 0 -} - -// PendingCreateTopologyNames returns the list of machine pool topology names that -// are pending create. -func (m *MachinePoolUpgradeTracker) PendingCreateTopologyNames() []string { - return sets.List(m.pendingCreateTopologyNames) -} - -// MarkPendingUpgrade marks a machine pool as in need of an upgrade. -// This is generally used to capture machine pools that have not yet -// picked up the topology version. -func (m *MachinePoolUpgradeTracker) MarkPendingUpgrade(name string) { - m.pendingUpgradeNames.Insert(name) -} - -// IsPendingUpgrade returns true is the MachinePool marked as pending upgrade. -func (m *MachinePoolUpgradeTracker) IsPendingUpgrade(name string) bool { - return m.pendingUpgradeNames.Has(name) -} - -// IsAnyPendingUpgrade returns true if any of the machine pools are pending -// an upgrade. Returns false, otherwise. -func (m *MachinePoolUpgradeTracker) IsAnyPendingUpgrade() bool { - return len(m.pendingUpgradeNames) != 0 -} - -// PendingUpgradeNames returns the list of machine pool names that -// are pending an upgrade. -func (m *MachinePoolUpgradeTracker) PendingUpgradeNames() []string { +func (m *WorkerUpgradeTracker) PendingUpgradeNames() []string { return sets.List(m.pendingUpgradeNames) } -// MarkDeferredUpgrade marks that the upgrade for a MachinePool +// MarkDeferredUpgrade marks that the upgrade for a MachineDeployment/MachinePool // has been deferred. -func (m *MachinePoolUpgradeTracker) MarkDeferredUpgrade(name string) { +func (m *WorkerUpgradeTracker) MarkDeferredUpgrade(name string) { m.deferredNames.Insert(name) } -// DeferredUpgradeNames returns the list of MachinePool names for +// DeferredUpgradeNames returns the list of MachineDeployment/MachinePool names for // which the upgrade has been deferred. -func (m *MachinePoolUpgradeTracker) DeferredUpgradeNames() []string { +func (m *WorkerUpgradeTracker) DeferredUpgradeNames() []string { return sets.List(m.deferredNames) } // DeferredUpgrade returns true if the upgrade has been deferred for any of the -// MachinePools. Returns false, otherwise. -func (m *MachinePoolUpgradeTracker) DeferredUpgrade() bool { +// MachineDeployments/MachinePools. Returns false, otherwise. +func (m *WorkerUpgradeTracker) DeferredUpgrade() bool { return len(m.deferredNames) != 0 } diff --git a/internal/controllers/topology/cluster/scope/upgradetracker_test.go b/internal/controllers/topology/cluster/scope/upgradetracker_test.go index 49dfcb490c65..0bb13dc88a08 100644 --- a/internal/controllers/topology/cluster/scope/upgradetracker_test.go +++ b/internal/controllers/topology/cluster/scope/upgradetracker_test.go @@ -23,7 +23,7 @@ import ( ) func TestNewUpgradeTracker(t *testing.T) { - t.Run("should set the correct value for maxMachineDeploymentUpgradeConcurrency", func(t *testing.T) { + t.Run("should set the correct value for maxUpgradeConcurrency", func(t *testing.T) { tests := []struct { name string options []UpgradeTrackerOption @@ -49,7 +49,7 @@ func TestNewUpgradeTracker(t *testing.T) { for _, tt := range tests { g := NewWithT(t) got := NewUpgradeTracker(tt.options...) - g.Expect(got.MachineDeployments.maxMachineDeploymentUpgradeConcurrency).To(Equal(tt.want)) + g.Expect(got.MachineDeployments.maxUpgradeConcurrency).To(Equal(tt.want)) } }) } diff --git a/internal/controllers/topology/cluster/util.go b/internal/controllers/topology/cluster/util.go index 29eed8494b2d..5fa6711c1393 100644 --- a/internal/controllers/topology/cluster/util.go +++ b/internal/controllers/topology/cluster/util.go @@ -37,6 +37,11 @@ func infrastructureMachineTemplateNamePrefix(clusterName, machineDeploymentTopol return fmt.Sprintf("%s-%s-", clusterName, machineDeploymentTopologyName) } +// bootstrapConfigNamePrefix calculates the name prefix for a BootstrapConfig. +func bootstrapConfigNamePrefix(clusterName, machinePoolTopologyName string) string { + return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) +} + // infrastructureMachinePoolNamePrefix calculates the name prefix for a InfrastructureMachinePool. func infrastructureMachinePoolNamePrefix(clusterName, machinePoolTopologyName string) string { return fmt.Sprintf("%s-%s-", clusterName, machinePoolTopologyName) diff --git a/internal/topology/check/compatibility.go b/internal/topology/check/compatibility.go index f57a56fb75b0..c253db58a90c 100644 --- a/internal/topology/check/compatibility.go +++ b/internal/topology/check/compatibility.go @@ -196,7 +196,8 @@ func LocalObjectTemplateIsValid(template *clusterv1.LocalObjectTemplate, namespa // 1) InfrastructureCluster Templates are compatible. // 2) ControlPlane Templates are compatible. // 3) ControlPlane InfrastructureMachineTemplates are compatible. -// 4) MachineDeploymentClasses have not been deleted and are compatible. +// 4) MachineDeploymentClasses are compatible. +// 5) MachinePoolClasses are compatible. func ClusterClassesAreCompatible(current, desired *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList if current == nil { @@ -221,13 +222,14 @@ func ClusterClassesAreCompatible(current, desired *clusterv1.ClusterClass) field // Validate changes to MachineDeployments. allErrs = append(allErrs, MachineDeploymentClassesAreCompatible(current, desired)...) + // Validate changes to MachinePools. + allErrs = append(allErrs, MachinePoolClassesAreCompatible(current, desired)...) + return allErrs } // MachineDeploymentClassesAreCompatible checks if each MachineDeploymentClass in the new ClusterClass is a compatible change from the previous ClusterClass. -// It checks if: -// 1) Any MachineDeploymentClass has been removed. -// 2) If the MachineDeploymentClass.Template.Infrastructure reference has changed its Group or Kind. +// It checks if the MachineDeploymentClass.Template.Infrastructure reference has changed its Group or Kind. func MachineDeploymentClassesAreCompatible(current, desired *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -267,6 +269,28 @@ func MachineDeploymentClassesAreUnique(clusterClass *clusterv1.ClusterClass) fie return allErrs } +// MachinePoolClassesAreCompatible checks if each MachinePoolClass in the new ClusterClass is a compatible change from the previous ClusterClass. +// It checks if the MachinePoolClass.Template.Infrastructure reference has changed its Group or Kind. +func MachinePoolClassesAreCompatible(current, desired *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + + // Ensure previous MachinePool class was modified in a compatible way. + for _, class := range desired.Spec.Workers.MachinePools { + for i, oldClass := range current.Spec.Workers.MachinePools { + if class.Class == oldClass.Class { + // NOTE: class.Template.Metadata and class.Template.Bootstrap are allowed to change; + + // class.Template.Bootstrap is ensured syntactically correct by LocalObjectTemplateIsValid. + + // Validates class.Template.Infrastructure template changes in a compatible way + allErrs = append(allErrs, LocalObjectTemplatesAreCompatible(oldClass.Template.Infrastructure, class.Template.Infrastructure, + field.NewPath("spec", "workers", "machinePools").Index(i))...) + } + } + } + return allErrs +} + // MachinePoolClassesAreUnique checks that no two MachinePoolClasses in a ClusterClass share a name. func MachinePoolClassesAreUnique(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -297,7 +321,7 @@ func MachineDeploymentTopologiesAreValidAndDefinedInClusterClass(desired *cluste return nil } // MachineDeployment clusterClass must be defined in the ClusterClass. - machineDeploymentClasses := classNamesFromWorkerClass(clusterClass.Spec.Workers) + machineDeploymentClasses := mdClassNamesFromWorkerClass(clusterClass.Spec.Workers) names := sets.Set[string]{} for i, md := range desired.Spec.Topology.Workers.MachineDeployments { if errs := validation.IsValidLabelValue(md.Name); len(errs) != 0 { @@ -350,6 +374,70 @@ func MachineDeploymentTopologiesAreValidAndDefinedInClusterClass(desired *cluste return allErrs } +// MachinePoolTopologiesAreValidAndDefinedInClusterClass checks that each MachinePoolTopology name is not empty +// and unique, and each class in use is defined in ClusterClass.spec.Workers.MachinePools. +func MachinePoolTopologiesAreValidAndDefinedInClusterClass(desired *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) field.ErrorList { + var allErrs field.ErrorList + if desired.Spec.Topology.Workers == nil { + return nil + } + if len(desired.Spec.Topology.Workers.MachinePools) == 0 { + return nil + } + // MachinePool clusterClass must be defined in the ClusterClass. + machinePoolClasses := mpClassNamesFromWorkerClass(clusterClass.Spec.Workers) + names := sets.Set[string]{} + for i, mp := range desired.Spec.Topology.Workers.MachinePools { + if errs := validation.IsValidLabelValue(mp.Name); len(errs) != 0 { + for _, err := range errs { + allErrs = append( + allErrs, + field.Invalid( + field.NewPath("spec", "topology", "workers", "machinePools").Index(i).Child("name"), + mp.Name, + fmt.Sprintf("must be a valid label value %s", err), + ), + ) + } + } + + if !machinePoolClasses.Has(mp.Class) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "topology", "workers", "machinePools").Index(i).Child("class"), + mp.Class, + fmt.Sprintf("MachinePoolClass with name %q does not exist in ClusterClass %q", + mp.Class, clusterClass.Name), + ), + ) + } + + // MachinePoolTopology name should not be empty. + if mp.Name == "" { + allErrs = append( + allErrs, + field.Required( + field.NewPath("spec", "topology", "workers", "machinePools").Index(i).Child("name"), + "name must not be empty", + ), + ) + continue + } + + if names.Has(mp.Name) { + allErrs = append(allErrs, + field.Invalid( + field.NewPath("spec", "topology", "workers", "machinePools").Index(i).Child("name"), + mp.Name, + fmt.Sprintf("name must be unique. MachinePool with name %q is defined more than once", mp.Name), + ), + ) + } + names.Insert(mp.Name) + } + return allErrs +} + // ClusterClassReferencesAreValid checks that each template reference in the ClusterClass is valid . func ClusterClassReferencesAreValid(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -368,14 +456,31 @@ func ClusterClassReferencesAreValid(clusterClass *clusterv1.ClusterClass) field. allErrs = append(allErrs, LocalObjectTemplateIsValid(&mdc.Template.Infrastructure, clusterClass.Namespace, field.NewPath("spec", "workers", "machineDeployments").Index(i).Child("template", "infrastructure"))...) } + + for i, mpc := range clusterClass.Spec.Workers.MachinePools { + allErrs = append(allErrs, LocalObjectTemplateIsValid(&mpc.Template.Bootstrap, clusterClass.Namespace, + field.NewPath("spec", "workers", "machinePools").Index(i).Child("template", "bootstrap"))...) + allErrs = append(allErrs, LocalObjectTemplateIsValid(&mpc.Template.Infrastructure, clusterClass.Namespace, + field.NewPath("spec", "workers", "machinePools").Index(i).Child("template", "infrastructure"))...) + } + return allErrs } -// classNames returns the set of MachineDeployment class names. -func classNamesFromWorkerClass(w clusterv1.WorkersClass) sets.Set[string] { +// mdClassNamesFromWorkerClass returns the set of MachineDeployment class names. +func mdClassNamesFromWorkerClass(w clusterv1.WorkersClass) sets.Set[string] { classes := sets.Set[string]{} for _, class := range w.MachineDeployments { classes.Insert(class.Class) } return classes } + +// mpClassNamesFromWorkerClass returns the set of MachinePool class names. +func mpClassNamesFromWorkerClass(w clusterv1.WorkersClass) sets.Set[string] { + classes := sets.Set[string]{} + for _, class := range w.MachinePools { + classes.Insert(class.Class) + } + return classes +} diff --git a/internal/webhooks/cluster.go b/internal/webhooks/cluster.go index b9dbfde380a5..85e350ccf8f2 100644 --- a/internal/webhooks/cluster.go +++ b/internal/webhooks/cluster.go @@ -490,7 +490,7 @@ func validateCIDRBlocks(fldPath *field.Path, cidrs []string) field.ErrorList { return allErrs } -// DefaultAndValidateVariables defaults and validates variables in the Cluster and MachineDeployment topologies based +// DefaultAndValidateVariables defaults and validates variables in the Cluster and MachineDeployment/MachinePool topologies based // on the definitions in the ClusterClass. func DefaultAndValidateVariables(cluster *clusterv1.Cluster, clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList @@ -580,6 +580,8 @@ func ValidateClusterForClusterClass(cluster *clusterv1.Cluster, clusterClass *cl } allErrs = append(allErrs, check.MachineDeploymentTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) + allErrs = append(allErrs, check.MachinePoolTopologiesAreValidAndDefinedInClusterClass(cluster, clusterClass)...) + // Validate the MachineHealthChecks defined in the cluster topology. allErrs = append(allErrs, validateMachineHealthChecks(cluster, clusterClass)...) return allErrs diff --git a/internal/webhooks/clusterclass.go b/internal/webhooks/clusterclass.go index f1368c041a3a..78549f43966a 100644 --- a/internal/webhooks/clusterclass.go +++ b/internal/webhooks/clusterclass.go @@ -302,7 +302,8 @@ func (webhook *ClusterClass) validateRemovedMachinePoolClassesAreNotUsed(cluster for _, c := range clusters { for _, machinePoolTopology := range c.Spec.Topology.Workers.MachinePools { if removedClasses.Has(machinePoolTopology.Class) { - // TODO(killianmuldoon): Same as above for MachineDeployments + // TODO(killianmuldoon): Improve error printing here so large scale changes don't flood the error log e.g. deduplication, only example usages given. + // TODO: consider if we get the index of the MachinePoolClass being deleted allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "workers", "machinePools"), fmt.Sprintf("MachinePoolClass %q cannot be deleted because it is used by Cluster %q", machinePoolTopology.Class, c.Name), @@ -429,13 +430,11 @@ func validateMachineHealthCheckClass(fldPath *field.Path, namepace string, m *cl func validateClusterClassMetadata(clusterClass *clusterv1.ClusterClass) field.ErrorList { var allErrs field.ErrorList allErrs = append(allErrs, clusterClass.Spec.ControlPlane.Metadata.Validate(field.NewPath("spec", "controlPlane", "metadata"))...) - workerPath := field.NewPath("spec", "workers", "machineDeployments") for idx, m := range clusterClass.Spec.Workers.MachineDeployments { - allErrs = append(allErrs, validateMachineDeploymentMetadata(m, workerPath.Index(idx))...) + allErrs = append(allErrs, m.Template.Metadata.Validate(field.NewPath("spec", "workers", "machineDeployments").Index(idx).Child("template", "metadata"))...) + } + for idx, m := range clusterClass.Spec.Workers.MachinePools { + allErrs = append(allErrs, m.Template.Metadata.Validate(field.NewPath("spec", "workers", "machinePools").Index(idx).Child("template", "metadata"))...) } return allErrs } - -func validateMachineDeploymentMetadata(m clusterv1.MachineDeploymentClass, fldPath *field.Path) field.ErrorList { - return m.Template.Metadata.Validate(fldPath.Child("template", "metadata")) -} diff --git a/internal/webhooks/patch_validation.go b/internal/webhooks/patch_validation.go index 5fe57c1136b7..f7e2166ee458 100644 --- a/internal/webhooks/patch_validation.go +++ b/internal/webhooks/patch_validation.go @@ -211,6 +211,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste err := validateSelectorName(name, path, "machineDeploymentClass", i) if err != nil { allErrs = append(allErrs, err) + break } for _, md := range class.Spec.Workers.MachineDeployments { var matches bool @@ -246,6 +247,7 @@ func validateSelectors(selector clusterv1.PatchSelector, class *clusterv1.Cluste err := validateSelectorName(name, path, "machinePoolClass", i) if err != nil { allErrs = append(allErrs, err) + break } for _, mp := range class.Spec.Workers.MachinePools { var matches bool diff --git a/test/extension/handlers/topologymutation/handler.go b/test/extension/handlers/topologymutation/handler.go index a364d763713c..3807e2eba832 100644 --- a/test/extension/handlers/topologymutation/handler.go +++ b/test/extension/handlers/topologymutation/handler.go @@ -217,7 +217,7 @@ func patchKubeadmConfigTemplate(ctx context.Context, k *bootstrapv1.KubeadmConfi log := ctrl.LoggerFrom(ctx) // Only patch the customImage if this DockerMachineTemplate belongs to a MachineDeployment or MachinePool with class "default-class" - // NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to MachineDeployments. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to MachineDeployments. mdClass, mdFound, err := topologymutation.GetStringVariable(templateVariables, "builtin.machineDeployment.class") if err != nil { return errors.Wrap(err, "could not set cgroup-driver to KubeadmConfigTemplate template kubeletExtraArgs") @@ -231,7 +231,7 @@ func patchKubeadmConfigTemplate(ctx context.Context, k *bootstrapv1.KubeadmConfi // This is a required variable. Return an error if it's not found. // NOTE: this should never happen because it is enforced by the patch engine. if !mdFound && !mpFound { - return errors.New("could not set cgroup-driver to KubeadmConfigTemplate template kubeletExtraArgs: variable \"builtin.machineDeployment.class\" not found") + return errors.New("could not set cgroup-driver to KubeadmConfigTemplate template kubeletExtraArgs: could find neither \"builtin.machineDeployment.class\" nor \"builtin.machinePool.class\" variable") } if mdClass == "default-worker" { @@ -272,7 +272,7 @@ func patchKubeadmConfigTemplate(ctx context.Context, k *bootstrapv1.KubeadmConfi // If the Kubernetes version from builtin.machinePool.version is below 1.24.0 set "cgroup-driver": "cgroupDriverCgroupfs" to // - InitConfiguration.KubeletExtraArgs // - JoinConfiguration.KubeletExtraArgs - // NOTE: machinePool version might be different than Cluster.version or other machinePool's versions; + // NOTE: MachinePool version might be different than Cluster.version or other MachinePool's versions; // the builtin variables provides the right version to use. mpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.machinePool.version") if err != nil { @@ -316,7 +316,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr // If the DockerMachineTemplate belongs to the ControlPlane, set the images using the ControlPlane version. // NOTE: ControlPlane version might be different than Cluster.version or MachineDeployment's versions; // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to the ControlPlane. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to the ControlPlane. cpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version") if err != nil { return errors.Wrap(err, "could not set customImage to control plane dockerMachineTemplate") @@ -335,9 +335,9 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr } // If the DockerMachineTemplate belongs to a MachineDeployment, set the images the MachineDeployment version. - // NOTE: MachineDeployment version might be different than Cluster.version or other MachineDeployment's versions; + // NOTE: MachineDeployment version might be different from Cluster.version or other MachineDeployment's versions; // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a built in variable that exists only for templates liked to MachineDeployments. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to MachineDeployments. mdVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.machineDeployment.version") if err != nil { return errors.Wrap(err, "could not set customImage to MachineDeployment DockerMachineTemplate") @@ -354,7 +354,7 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr return nil } - // If the Docker Machine didn't have variables for either a control plane or a machineDeployment return an error. + // If the DockerMachineTemplate didn't have variables for either a control plane or a machineDeployment return an error. // NOTE: this should never happen because it is enforced by the patch engine. return errors.New("no version variables found for DockerMachineTemplate patch") } @@ -366,31 +366,10 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error { log := ctrl.LoggerFrom(ctx) - // If the DockerMachinePoolTemplate belongs to the ControlPlane, set the images using the ControlPlane version. - // NOTE: ControlPlane version might be different than Cluster.version or MachinePool's versions; - // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to the ControlPlane. - cpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version") - if err != nil { - return errors.Wrap(err, "could not set customImage to control plane dockerMachinePoolTemplate") - } - if found { - semVer, err := version.ParseMajorMinorPatchTolerant(cpVersion) - if err != nil { - return errors.Wrap(err, "could not parse control plane version") - } - kindMapping := kind.GetMapping(semVer, "") - - log.Info(fmt.Sprintf("Setting MachinePool custom image to %q", kindMapping.Image)) - dockerMachinePoolTemplate.Spec.Template.Spec.Template.CustomImage = kindMapping.Image - // return early if we have successfully patched a control plane dockerMachineTemplate - return nil - } - // If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version. - // NOTE: MachinePool version might be different than Cluster.version or other MachinePool's versions; + // NOTE: MachinePool version might be different from Cluster.version or other MachinePool's versions; // the builtin variables provides the right version to use. - // NOTE: This works by checking the existence of a built in variable that exists only for templates liked to MachinePools. + // NOTE: This works by checking the existence of a builtin variable that exists only for templates linked to MachinePools. mpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.machinePool.version") if err != nil { return errors.Wrap(err, "could not set customImage to MachinePool DockerMachinePoolTemplate") @@ -407,7 +386,7 @@ func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTempla return nil } - // If the Docker Machine didn't have variables for either a control plane or a machinePool return an error. + // If the DockerMachinePoolTemplate didn't have variables for a machinePool return an error. // NOTE: this should never happen because it is enforced by the patch engine. return errors.New("no version variables found for DockerMachinePoolTemplate patch") } diff --git a/test/infrastructure/docker/config/crd/kustomization.yaml b/test/infrastructure/docker/config/crd/kustomization.yaml index 3bcde1a5c866..517188bc269d 100644 --- a/test/infrastructure/docker/config/crd/kustomization.yaml +++ b/test/infrastructure/docker/config/crd/kustomization.yaml @@ -25,6 +25,7 @@ patchesStrategicMerge: - patches/webhook_in_dockermachinetemplates.yaml - patches/webhook_in_dockerclusters.yaml - patches/webhook_in_dockerclustertemplates.yaml + - patches/webhook_in_dockermachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizewebhookpatch # [CERTMANAGER] To enable webhook, uncomment all the sections with [CERTMANAGER] prefix. # patches here are for enabling the CA injection for each CRD @@ -33,6 +34,7 @@ patchesStrategicMerge: - patches/cainjection_in_dockermachinetemplates.yaml - patches/cainjection_in_dockerclusters.yaml - patches/cainjection_in_dockerclustertemplates.yaml + - patches/cainjection_in_dockermachinepooltemplates.yaml # +kubebuilder:scaffold:crdkustomizecainjectionpatch # the following config is for teaching kustomize how to do kustomization for CRDs. diff --git a/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepooltemplates.yaml new file mode 100644 index 000000000000..41f08eddb7ba --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/cainjection_in_dockermachinepooltemplates.yaml @@ -0,0 +1,8 @@ +# The following patch adds a directive for certmanager to inject CA into the CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + cert-manager.io/inject-ca-from: $(CERTIFICATE_NAMESPACE)/$(CERTIFICATE_NAME) + name: dockermachinepooltemplates.infrastructure.cluster.x-k8s.io diff --git a/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepooltemplates.yaml b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepooltemplates.yaml new file mode 100644 index 000000000000..7bc2cde83d79 --- /dev/null +++ b/test/infrastructure/docker/config/crd/patches/webhook_in_dockermachinepooltemplates.yaml @@ -0,0 +1,19 @@ +# The following patch enables conversion webhook for CRD +# CRD conversion requires k8s 1.13 or later. +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: dockermachinepooltemplates.infrastructure.cluster.x-k8s.io +spec: + conversion: + strategy: Webhook + webhook: + conversionReviewVersions: ["v1", "v1beta1"] + clientConfig: + # this is "\n" used as a placeholder, otherwise it will be rejected by the apiserver for being blank, + # but we're going to set it later using the cert-manager (or potentially a patch if not using cert-manager) + caBundle: Cg== + service: + namespace: system + name: webhook-service + path: /convert diff --git a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go index b4f16926739e..d08bdf4300a4 100644 --- a/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go +++ b/test/infrastructure/docker/exp/api/v1beta1/dockermachinepooltemplate_types.go @@ -50,7 +50,7 @@ type DockerMachinePoolTemplateList struct { } func init() { - objectTypes = append(objectTypes, &DockerMachinePool{}, &DockerMachinePoolList{}) + objectTypes = append(objectTypes, &DockerMachinePoolTemplate{}, &DockerMachinePoolTemplateList{}) } // DockerMachinePoolTemplateResource describes the data needed to create a DockerMachine from a template.