Skip to content

Commit

Permalink
Re-diff files
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Aug 11, 2023
1 parent 123a5aa commit 74ff719
Show file tree
Hide file tree
Showing 4 changed files with 83 additions and 15 deletions.
19 changes: 13 additions & 6 deletions internal/controllers/topology/cluster/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*
}
s.UpgradeTracker.MachineDeployments.MarkUpgrading(mdUpgradingNames...)

// Mark all the MachinePools that are currently upgrading.
mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, r.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)

// Compute the desired state of the ControlPlane object, eventually adding a reference to the
// InfrastructureMachineTemplate generated by the previous step.
if desiredState.ControlPlane.Object, err = r.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
Expand Down Expand Up @@ -920,17 +927,17 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c

// Compute the bootstrap template.
currentMachinePool := s.Current.MachinePools[machinePoolTopology.Name]
var currentBootstrapTemplateRef *corev1.ObjectReference
var currentBootstrapConfigRef *corev1.ObjectReference
if currentMachinePool != nil && currentMachinePool.BootstrapObject != nil {
currentBootstrapTemplateRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
currentBootstrapConfigRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
}
var err error
desiredMachinePool.BootstrapObject, err = templateToObject(templateToInput{
template: machinePoolBlueprint.BootstrapConfig,
templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapConfig),
cluster: s.Current.Cluster,
namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name),
currentObjectRef: currentBootstrapTemplateRef,
currentObjectRef: currentBootstrapConfigRef,
})
if err != nil {
return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name)
Expand Down Expand Up @@ -991,9 +998,9 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
}

// Compute the MachinePool object.
desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachinePool.BootstrapObject)
desiredBootstrapConfigRef, err := calculateRefDesiredAPIVersion(currentBootstrapConfigRef, desiredMachinePool.BootstrapObject)
if err != nil {
return nil, errors.Wrap(err, "failed to calculate desired bootstrap template ref")
return nil, errors.Wrap(err, "failed to calculate desired bootstrap config ref")
}
desiredInfraMachineTemplateRef, err := calculateRefDesiredAPIVersion(currentInfraMachineTemplateRef, desiredMachinePool.InfrastructureMachinePoolObject)
if err != nil {
Expand All @@ -1016,7 +1023,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
Spec: clusterv1.MachineSpec{
ClusterName: s.Current.Cluster.Name,
Version: pointer.String(version),
Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef},
Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef},
InfrastructureRef: *desiredInfraMachineTemplateRef,
NodeDrainTimeout: nodeDrainTimeout,
NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
Expand Down
49 changes: 42 additions & 7 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,11 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope
len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade
!s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create
!s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() && // No MachineDeployments are pending an upgrade
!s.UpgradeTracker.MachineDeployments.DeferredUpgrade() { // No MachineDeployments have deferred an upgrade
!s.UpgradeTracker.MachineDeployments.DeferredUpgrade() && // No MachineDeployments have deferred an upgrade
len(s.UpgradeTracker.MachinePools.UpgradingNames()) == 0 && // Machine pools are not upgrading or not about to upgrade
!s.UpgradeTracker.MachinePools.IsAnyPendingCreate() && // No MachinePools are pending create
!s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() && // No MachinePools are pending an upgrade
!s.UpgradeTracker.MachinePools.DeferredUpgrade() { // No MachinePools have deferred an upgrade
// Everything is stable and the cluster can be considered fully upgraded.
hookRequest := &runtimehooksv1.AfterClusterUpgradeRequest{
Cluster: *s.Current.Cluster,
Expand Down Expand Up @@ -747,7 +751,7 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope)
continue
}

if err := r.createMachinePool(ctx, s.Current.Cluster, mp); err != nil {
if err := r.createMachinePool(ctx, s, mp); err != nil {
return err
}
}
Expand Down Expand Up @@ -800,9 +804,23 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope)
}

// createMachinePool creates a MachinePool and the corresponding templates.
func (r *Reconciler) createMachinePool(ctx context.Context, cluster *clusterv1.Cluster, mp *scope.MachinePoolState) error {
log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object)
func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *scope.MachinePoolState) error {
// Do not create the MachinePool if it is marked as pending create.
// This will also block MHC creation because creating the MHC without the corresponding
// MachinePool is unnecessary.
mpTopologyName, ok := mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]
if !ok || mpTopologyName == "" {
// Note: This is only an additional safety check and should not happen. The label will always be added when computing
// the desired MachinePool.
return errors.Errorf("new MachinePool is missing the %q label", clusterv1.ClusterTopologyMachinePoolNameLabel)
}
// Return early if the MachinePool is pending create.
if s.UpgradeTracker.MachinePools.IsPendingCreate(mpTopologyName) {
return nil
}

log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object)
cluster := s.Current.Cluster
infraCtx, _ := log.WithObject(mp.InfrastructureMachinePoolObject).Into(ctx)
if err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{
cluster: cluster,
Expand Down Expand Up @@ -830,6 +848,23 @@ func (r *Reconciler) createMachinePool(ctx context.Context, cluster *clusterv1.C
}
r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: mp.Object})

// Wait until MachinePool is visible in the cache.
// Note: We have to do this because otherwise using a cached client in current state could
// miss a newly created MachinePool (because the cache might be stale).
err = wait.PollUntilContextTimeout(ctx, 5*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKey{Namespace: mp.Object.Namespace, Name: mp.Object.Name}
if err := r.Client.Get(ctx, key, &expv1.MachinePool{}); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return errors.Wrapf(err, "failed waiting for MachinePool %s to be visible in the cache after create", mp.Object.Kind)
}

return nil
}

Expand Down Expand Up @@ -857,7 +892,7 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, cluster *clusterv1.C
return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object})
}

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
// Check differences between current and desired MachinePool, and eventually patch the current object.
log = log.WithObject(desiredMP.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMP.Object, desiredMP.Object)
if err != nil {
Expand Down Expand Up @@ -904,8 +939,8 @@ type machineDiff struct {
toCreate, toUpdate, toDelete []string
}

// calculateMachineDeploymentDiff compares two maps of MachineDeploymentState and calculates which
// MachineDeployments should be created, updated or deleted.
// calculateMachineDeploymentDiff compares two maps of MachinePoolState and calculates which
// MachinePools should be created, updated or deleted.
func calculateMachineDeploymentDiff(current, desired map[string]*scope.MachineDeploymentState) machineDiff {
var diff machineDiff

Expand Down
7 changes: 6 additions & 1 deletion internal/webhooks/clusterclass.go
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,7 @@ func (webhook *ClusterClass) validateRemovedMachineDeploymentClassesAreNotUsed(c
func (webhook *ClusterClass) validateRemovedMachinePoolClassesAreNotUsed(clusters []clusterv1.Cluster, oldClusterClass, newClusterClass *clusterv1.ClusterClass) field.ErrorList {
var allErrs field.ErrorList

removedClasses := webhook.removedMachineClasses(oldClusterClass, newClusterClass)
removedClasses := webhook.removedMachinePoolClasses(oldClusterClass, newClusterClass)
// If no classes have been removed return early as no further checks are needed.
if len(removedClasses) == 0 {
return nil
Expand Down Expand Up @@ -322,6 +322,11 @@ func (webhook *ClusterClass) removedMachineClasses(oldClusterClass, newClusterCl
removedClasses.Insert(oldClass.Class)
}
}
return removedClasses
}

func (webhook *ClusterClass) removedMachinePoolClasses(oldClusterClass, newClusterClass *clusterv1.ClusterClass) sets.Set[string] {
removedClasses := sets.Set[string]{}

mpClasses := webhook.classNamesFromMPWorkerClass(newClusterClass.Spec.Workers)
for _, oldClass := range oldClusterClass.Spec.Workers.MachinePools {
Expand Down
23 changes: 22 additions & 1 deletion test/extension/handlers/topologymutation/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -359,13 +359,34 @@ func patchDockerMachineTemplate(ctx context.Context, dockerMachineTemplate *infr
return errors.New("no version variables found for DockerMachineTemplate patch")
}

// patchDockerMachineTemplate patches the DockerMachinePoolTemplate.
// patchDockerMachinePoolTemplate patches the DockerMachinePoolTemplate.
// It sets the CustomImage to an image for the version in use by the MachinePool.
// NOTE: this patch is not required anymore after the introduction of the kind mapper in kind, however we keep it
// as example of version aware patches.
func patchDockerMachinePoolTemplate(ctx context.Context, dockerMachinePoolTemplate *infraexpv1.DockerMachinePoolTemplate, templateVariables map[string]apiextensionsv1.JSON) error {
log := ctrl.LoggerFrom(ctx)

// If the DockerMachinePoolTemplate belongs to the ControlPlane, set the images using the ControlPlane version.
// NOTE: ControlPlane version might be different than Cluster.version or MachinePool's versions;
// the builtin variables provides the right version to use.
// NOTE: This works by checking the existence of a builtin variable that exists only for templates liked to the ControlPlane.
cpVersion, found, err := topologymutation.GetStringVariable(templateVariables, "builtin.controlPlane.version")
if err != nil {
return errors.Wrap(err, "could not set customImage to control plane dockerMachinePoolTemplate")
}
if found {
semVer, err := version.ParseMajorMinorPatchTolerant(cpVersion)
if err != nil {
return errors.Wrap(err, "could not parse control plane version")
}
kindMapping := kind.GetMapping(semVer, "")

log.Info(fmt.Sprintf("Setting MachinePool custom image to %q", kindMapping.Image))
dockerMachinePoolTemplate.Spec.Template.Spec.CustomImage = kindMapping.Image
// return early if we have successfully patched a control plane dockerMachineTemplate
return nil
}

// If the DockerMachinePoolTemplate belongs to a MachinePool, set the images the MachinePool version.
// NOTE: MachinePool version might be different than Cluster.version or other MachinePool's versions;
// the builtin variables provides the right version to use.
Expand Down

0 comments on commit 74ff719

Please sign in to comment.