Skip to content

Commit

Permalink
Minor fixes for CC+MP implementation
Browse files Browse the repository at this point in the history
Signed-off-by: Stefan Büringer buringerst@vmware.com
  • Loading branch information
sbueringer committed Aug 28, 2023
1 parent 4867dad commit e292b46
Show file tree
Hide file tree
Showing 22 changed files with 285 additions and 264 deletions.
4 changes: 2 additions & 2 deletions internal/controllers/topology/cluster/blueprint.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste
return nil, errors.Wrapf(err, "failed to get infrastructure machine template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class)
}

// Get the bootstrap machine template.
// Get the bootstrap config template.
machineDeploymentBlueprint.BootstrapTemplate, err = r.getReference(ctx, machineDeploymentClass.Template.Bootstrap.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get bootstrap config template for %s, MachineDeployment class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machineDeploymentClass.Class)
Expand Down Expand Up @@ -109,7 +109,7 @@ func (r *Reconciler) getBlueprint(ctx context.Context, cluster *clusterv1.Cluste
return nil, errors.Wrapf(err, "failed to get InfrastructureMachinePoolTemplate for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class)
}

// Get the bootstrap config.
// Get the bootstrap config template.
machinePoolBlueprint.BootstrapTemplate, err = r.getReference(ctx, machinePoolClass.Template.Bootstrap.Ref)
if err != nil {
return nil, errors.Wrapf(err, "failed to get bootstrap config for %s, MachinePool class %q", tlog.KObj{Obj: blueprint.ClusterClass}, machinePoolClass.Class)
Expand Down
16 changes: 12 additions & 4 deletions internal/controllers/topology/cluster/conditions.go
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,18 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste
}

// The topology is not considered as fully reconciled if one of the following is true:
// * either the Control Plane or any of the MachineDeployments are still pending to pick up the new version
// * either the Control Plane or any of the MachineDeployments/MachinePools are still pending to pick up the new version
// (generally happens when upgrading the cluster)
// * when there are MachineDeployments for which the upgrade has been deferred
// * when new MachineDeployments are pending to be created
// * when there are MachineDeployments/MachinePools for which the upgrade has been deferred
// * when new MachineDeployments/MachinePools are pending to be created
// (generally happens when upgrading the cluster)
if s.UpgradeTracker.ControlPlane.IsPendingUpgrade ||
s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() ||
s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() ||
s.UpgradeTracker.MachineDeployments.DeferredUpgrade() {
s.UpgradeTracker.MachineDeployments.DeferredUpgrade() ||
s.UpgradeTracker.MachinePools.IsAnyPendingCreate() ||
s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() ||
s.UpgradeTracker.MachinePools.DeferredUpgrade() {
msgBuilder := &strings.Builder{}
var reason string

Expand Down Expand Up @@ -180,6 +183,11 @@ func (r *Reconciler) reconcileTopologyReconciledCondition(s *scope.Scope, cluste
fmt.Fprintf(msgBuilder, " MachineDeployment(s) %s are upgrading",
computeNameList(s.UpgradeTracker.MachineDeployments.UpgradingNames()),
)

case len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0:
fmt.Fprintf(msgBuilder, " MachinePool(s) %s are upgrading",
computeNameList(s.UpgradeTracker.MachinePools.UpgradingNames()),
)
}

conditions.Set(
Expand Down
5 changes: 4 additions & 1 deletion internal/controllers/topology/cluster/current_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,9 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa
state := make(scope.MachinePoolsStateMap)

// List all the machine pools in the current cluster and in a managed topology.
// Note: This is a cached list call. We ensure in reconcile_state that the cache is up-to-date
// after we create/update a MachinePool and we double-check if an MP already exists before
// we create it.
mp := &expv1.MachinePoolList{}
err := r.Client.List(ctx, mp,
client.MatchingLabels{
Expand Down Expand Up @@ -328,7 +331,7 @@ func (r *Reconciler) getCurrentMachinePoolState(ctx context.Context, blueprintMa
// Gets the infraRef.
infraRef := &m.Spec.Template.Spec.InfrastructureRef
if infraRef.Name == "" {
return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachineTemplate", tlog.KObj{Obj: m})
return nil, fmt.Errorf("%s does not have a reference to a InfrastructureMachinePool", tlog.KObj{Obj: m})
}

// If the mpTopology exists in the Cluster, lookup the corresponding mpBluePrint and align
Expand Down
29 changes: 18 additions & 11 deletions internal/controllers/topology/cluster/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*
if len(s.Current.MachinePools) > 0 {
client, err := r.Tracker.GetClient(ctx, client.ObjectKeyFromObject(s.Current.Cluster))
if err != nil {
return nil, err
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
// Mark all the MachinePools that are currently upgrading.
mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, client)
Expand Down Expand Up @@ -438,7 +438,7 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc
// change the UpgradeTracker accordingly, otherwise the hook call is completed and we
// can remove this hook from the list of pending-hooks.
if hookResponse.RetryAfterSeconds != 0 {
log.Infof("MachineDeployments upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))
log.Infof("MachineDeployments/MachinePools upgrade to version %q are blocked by %q hook", desiredVersion, runtimecatalog.HookName(runtimehooksv1.AfterControlPlaneUpgrade))
} else {
if err := hooks.MarkAsDone(ctx, r.Client, s.Current.Cluster, runtimehooksv1.AfterControlPlaneUpgrade); err != nil {
return "", err
Expand All @@ -464,10 +464,11 @@ func (r *Reconciler) computeControlPlaneVersion(ctx context.Context, s *scope.Sc
}

// If the control plane is not upgrading or scaling, we can assume the control plane is stable.
// However, we should also check for the MachineDeployments upgrading.
// If the MachineDeployments are upgrading, then do not pick up the desiredVersion yet.
// We will pick up the new version after the MachineDeployments finish upgrading.
if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 {
// However, we should also check for the MachineDeployments/MachinePools upgrading.
// If the MachineDeployments/MachinePools are upgrading, then do not pick up the desiredVersion yet.
// We will pick up the new version after the MachineDeployments/MachinePools finish upgrading.
if len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) > 0 ||
len(s.UpgradeTracker.MachinePools.UpgradingNames()) > 0 {
return *currentVersion, nil
}

Expand Down Expand Up @@ -950,7 +951,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
template: machinePoolBlueprint.BootstrapTemplate,
templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapTemplate),
cluster: s.Current.Cluster,
namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name),
namePrefix: bootstrapConfigNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name),
currentObjectRef: currentBootstrapConfigRef,
})
if err != nil {
Expand All @@ -961,11 +962,11 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
if bootstrapObjectLabels == nil {
bootstrapObjectLabels = map[string]string{}
}
// Add ClusterTopologyMachinePoolLabel to the generated Bootstrap template
// Add ClusterTopologyMachinePoolLabel to the generated Bootstrap config
bootstrapObjectLabels[clusterv1.ClusterTopologyMachinePoolNameLabel] = machinePoolTopology.Name
desiredMachinePool.BootstrapObject.SetLabels(bootstrapObjectLabels)

// Compute the Infrastructure ref.
// Compute the InfrastructureMachinePool.
var currentInfraMachinePoolRef *corev1.ObjectReference
if currentMachinePool != nil && currentMachinePool.InfrastructureMachinePoolObject != nil {
currentInfraMachinePoolRef = &currentMachinePool.Object.Spec.Template.Spec.InfrastructureRef
Expand All @@ -991,6 +992,11 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
version := computeMachinePoolVersion(s, machinePoolTopology, currentMachinePool)

// Compute values that can be set both in the MachinePoolClass and in the MachinePoolTopology
minReadySeconds := machinePoolClass.MinReadySeconds
if machinePoolTopology.MinReadySeconds != nil {
minReadySeconds = machinePoolTopology.MinReadySeconds
}

failureDomains := machinePoolClass.FailureDomains
if machinePoolTopology.FailureDomains != nil {
failureDomains = machinePoolTopology.FailureDomains
Expand Down Expand Up @@ -1031,8 +1037,9 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
Namespace: s.Current.Cluster.Namespace,
},
Spec: expv1.MachinePoolSpec{
ClusterName: s.Current.Cluster.Name,
FailureDomains: failureDomains,
ClusterName: s.Current.Cluster.Name,
MinReadySeconds: minReadySeconds,
FailureDomains: failureDomains,
Template: clusterv1.MachineTemplateSpec{
Spec: clusterv1.MachineSpec{
ClusterName: s.Current.Cluster.Name,
Expand Down
4 changes: 2 additions & 2 deletions internal/controllers/topology/cluster/patches/patch.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func (i PreserveFields) ApplyToHelper(opts *PatchOptions) {
opts.preserveFields = i
}

// patchObject overwrites spec in object with spec.template.spec of patchedTemplate,
// patchObject overwrites spec in object with spec.template.spec of modifiedObject,
// while preserving the configured fields.
// For example, ControlPlane.spec will be overwritten with the patched
// ControlPlaneTemplate.spec.template.spec but preserving spec.version and spec.replicas
Expand All @@ -66,7 +66,7 @@ func patchObject(ctx context.Context, object, modifiedObject *unstructured.Unstr
return patchUnstructured(ctx, object, modifiedObject, "spec.template.spec", "spec", opts...)
}

// patchTemplate overwrites spec.template.spec in template with spec.template.spec of patchedTemplate,
// patchTemplate overwrites spec.template.spec in template with spec.template.spec of modifiedTemplate,
// while preserving the configured fields.
// For example, it's possible to patch BootstrapTemplate.spec.template.spec with a patched
// BootstrapTemplate.spec.template.spec while preserving fields configured via opts.fieldsToPreserve.
Expand Down
4 changes: 2 additions & 2 deletions internal/controllers/topology/cluster/patches/template.go
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ func (t *requestItemBuilder) Build() (*runtimehooksv1.GeneratePatchesRequestItem
}

// getTemplateAsUnstructured is a utility func that returns a template matching the holderKind, holderFieldPath
// and mdTopologyName from a GeneratePatchesRequest.
// and topologyNames from a GeneratePatchesRequest.
func getTemplateAsUnstructured(req *runtimehooksv1.GeneratePatchesRequest, holderKind, holderFieldPath string, topologyNames requestTopologyName) (*unstructured.Unstructured, error) {
// Find the requestItem.
requestItem := getRequestItem(req, holderKind, holderFieldPath, topologyNames)
Expand Down Expand Up @@ -119,7 +119,7 @@ func getRequestItemByUID(req *runtimehooksv1.GeneratePatchesRequest, uid types.U
return nil
}

// getRequestItem is a utility func that returns a template matching the holderKind, holderFiledPath and mdTopologyName from a GeneratePatchesRequest.
// getRequestItem is a utility func that returns a template matching the holderKind, holderFiledPath and topologyNames from a GeneratePatchesRequest.
func getRequestItem(req *runtimehooksv1.GeneratePatchesRequest, holderKind, holderFieldPath string, topologyNames requestTopologyName) *runtimehooksv1.GeneratePatchesRequestItem {
for _, template := range req.Items {
if holderKind != "" && template.HolderReference.Kind != holderKind {
Expand Down
28 changes: 14 additions & 14 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -250,9 +250,9 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope
// Call the registered extensions for the hook after the cluster is fully upgraded.
// A clusters is considered fully upgraded if:
// - Control plane is stable (not upgrading, not scaling, not about to upgrade)
// - MachineDeployments are not currently upgrading
// - MachineDeployments are not pending an upgrade
// - MachineDeployments are not pending create
// - MachineDeployments/MachinePools are not currently upgrading
// - MachineDeployments/MachinePools are not pending an upgrade
// - MachineDeployments/MachinePools are not pending create
if isControlPlaneStable(s) && // Control Plane stable checks
len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade
!s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create
Expand Down Expand Up @@ -737,6 +737,10 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope)

// Create MachinePools.
if len(diff.toCreate) > 0 {
// In current state we only got the MP list via a cached call.
// As a consequence, in order to prevent the creation of duplicate MP due to stale reads,
// we are now using a live client to double-check here that the MachinePool
// to be created doesn't exist yet.
currentMPTopologyNames, err := r.getCurrentMachinePools(ctx, s)
if err != nil {
return err
Expand Down Expand Up @@ -806,8 +810,6 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope)
// createMachinePool creates a MachinePool and the corresponding templates.
func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *scope.MachinePoolState) error {
// Do not create the MachinePool if it is marked as pending create.
// This will also block MHC creation because creating the MHC without the corresponding
// MachinePool is unnecessary.
mpTopologyName, ok := mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]
if !ok || mpTopologyName == "" {
// Note: This is only an additional safety check and should not happen. The label will always be added when computing
Expand Down Expand Up @@ -868,7 +870,7 @@ func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *
return nil
}

// updateMachinePool updates a MachinePool. Also rotates the corresponding Templates if necessary.
// updateMachinePool updates a MachinePool. Also updates the corresponding objects if necessary.
func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, currentMP, desiredMP *scope.MachinePoolState) error {
log := tlog.LoggerFrom(ctx).WithMachinePool(desiredMP.Object)

Expand All @@ -882,20 +884,18 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, s *scope.Scope, curr
cluster := s.Current.Cluster
infraCtx, _ := log.WithObject(desiredMP.InfrastructureMachinePoolObject).Into(ctx)
if err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{
cluster: cluster,
current: currentMP.InfrastructureMachinePoolObject,
desired: desiredMP.InfrastructureMachinePoolObject,
versionGetter: contract.ControlPlane().Version().Get,
cluster: cluster,
current: currentMP.InfrastructureMachinePoolObject,
desired: desiredMP.InfrastructureMachinePoolObject,
}); err != nil {
return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object})
}

bootstrapCtx, _ := log.WithObject(desiredMP.BootstrapObject).Into(ctx)
if err := r.reconcileReferencedObject(bootstrapCtx, reconcileReferencedObjectInput{
cluster: cluster,
current: currentMP.BootstrapObject,
desired: desiredMP.BootstrapObject,
versionGetter: contract.ControlPlane().Version().Get,
cluster: cluster,
current: currentMP.BootstrapObject,
desired: desiredMP.BootstrapObject,
}); err != nil {
return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object})
}
Expand Down
2 changes: 1 addition & 1 deletion internal/controllers/topology/cluster/scope/blueprint.go
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ type MachinePoolBlueprint struct {
// BootstrapTemplate holds the bootstrap template for a MachinePool referenced from ClusterClass.
BootstrapTemplate *unstructured.Unstructured

// InfrastructureMachinePoolTemplate holds the infrastructure machine template for a MachinePool referenced from ClusterClass.
// InfrastructureMachinePoolTemplate holds the infrastructure machine pool template for a MachinePool referenced from ClusterClass.
InfrastructureMachinePoolTemplate *unstructured.Unstructured
}

Expand Down
5 changes: 3 additions & 2 deletions internal/controllers/topology/cluster/scope/scope_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import (
)

func TestNew(t *testing.T) {
t.Run("should set the right maxMachineDeploymentUpgradeConcurrency in UpgradeTracker from the cluster annotation", func(t *testing.T) {
t.Run("should set the right maxUpgradeConcurrency in UpgradeTracker from the cluster annotation", func(t *testing.T) {
tests := []struct {
name string
cluster *clusterv1.Cluster
Expand All @@ -53,7 +53,8 @@ func TestNew(t *testing.T) {
for _, tt := range tests {
g := NewWithT(t)
s := New(tt.cluster)
g.Expect(s.UpgradeTracker.MachineDeployments.maxMachineDeploymentUpgradeConcurrency).To(Equal(tt.want))
g.Expect(s.UpgradeTracker.MachineDeployments.maxUpgradeConcurrency).To(Equal(tt.want))
g.Expect(s.UpgradeTracker.MachinePools.maxUpgradeConcurrency).To(Equal(tt.want))
}
})
}
2 changes: 1 addition & 1 deletion internal/controllers/topology/cluster/scope/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@ type MachinePoolState struct {
}

// IsUpgrading determines if the MachinePool is upgrading.
// A machine deployment is considered upgrading if at least one of the Machines of this
// A machine pool is considered upgrading if at least one of the Machines of this
// MachinePool has a different version.
func (mp *MachinePoolState) IsUpgrading(ctx context.Context, c client.Client) (bool, error) {
// If the MachinePool has no version there is no definitive way to check if it is upgrading. Therefore, return false.
Expand Down
Loading

0 comments on commit e292b46

Please sign in to comment.