Skip to content

Commit

Permalink
Re-diff desired and reconcile state
Browse files Browse the repository at this point in the history
  • Loading branch information
willie-yao committed Aug 11, 2023
1 parent 123a5aa commit fbc594a
Show file tree
Hide file tree
Showing 2 changed files with 55 additions and 13 deletions.
19 changes: 13 additions & 6 deletions internal/controllers/topology/cluster/desired_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,13 @@ func (r *Reconciler) computeDesiredState(ctx context.Context, s *scope.Scope) (*
}
s.UpgradeTracker.MachineDeployments.MarkUpgrading(mdUpgradingNames...)

// Mark all the MachinePools that are currently upgrading.
mpUpgradingNames, err := s.Current.MachinePools.Upgrading(ctx, r.Client)
if err != nil {
return nil, errors.Wrap(err, "failed to check if any MachinePool is upgrading")
}
s.UpgradeTracker.MachinePools.MarkUpgrading(mpUpgradingNames...)

// Compute the desired state of the ControlPlane object, eventually adding a reference to the
// InfrastructureMachineTemplate generated by the previous step.
if desiredState.ControlPlane.Object, err = r.computeControlPlane(ctx, s, desiredState.ControlPlane.InfrastructureMachineTemplate); err != nil {
Expand Down Expand Up @@ -920,17 +927,17 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c

// Compute the bootstrap template.
currentMachinePool := s.Current.MachinePools[machinePoolTopology.Name]
var currentBootstrapTemplateRef *corev1.ObjectReference
var currentBootstrapConfigRef *corev1.ObjectReference
if currentMachinePool != nil && currentMachinePool.BootstrapObject != nil {
currentBootstrapTemplateRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
currentBootstrapConfigRef = currentMachinePool.Object.Spec.Template.Spec.Bootstrap.ConfigRef
}
var err error
desiredMachinePool.BootstrapObject, err = templateToObject(templateToInput{
template: machinePoolBlueprint.BootstrapConfig,
templateClonedFromRef: contract.ObjToRef(machinePoolBlueprint.BootstrapConfig),
cluster: s.Current.Cluster,
namePrefix: bootstrapTemplateNamePrefix(s.Current.Cluster.Name, machinePoolTopology.Name),
currentObjectRef: currentBootstrapTemplateRef,
currentObjectRef: currentBootstrapConfigRef,
})
if err != nil {
return nil, errors.Wrapf(err, "failed to compute bootstrap object for topology %q", machinePoolTopology.Name)
Expand Down Expand Up @@ -991,9 +998,9 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
}

// Compute the MachinePool object.
desiredBootstrapTemplateRef, err := calculateRefDesiredAPIVersion(currentBootstrapTemplateRef, desiredMachinePool.BootstrapObject)
desiredBootstrapConfigRef, err := calculateRefDesiredAPIVersion(currentBootstrapConfigRef, desiredMachinePool.BootstrapObject)
if err != nil {
return nil, errors.Wrap(err, "failed to calculate desired bootstrap template ref")
return nil, errors.Wrap(err, "failed to calculate desired bootstrap config ref")
}
desiredInfraMachineTemplateRef, err := calculateRefDesiredAPIVersion(currentInfraMachineTemplateRef, desiredMachinePool.InfrastructureMachinePoolObject)
if err != nil {
Expand All @@ -1016,7 +1023,7 @@ func computeMachinePool(_ context.Context, s *scope.Scope, machinePoolTopology c
Spec: clusterv1.MachineSpec{
ClusterName: s.Current.Cluster.Name,
Version: pointer.String(version),
Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapTemplateRef},
Bootstrap: clusterv1.Bootstrap{ConfigRef: desiredBootstrapConfigRef},
InfrastructureRef: *desiredInfraMachineTemplateRef,
NodeDrainTimeout: nodeDrainTimeout,
NodeVolumeDetachTimeout: nodeVolumeDetachTimeout,
Expand Down
49 changes: 42 additions & 7 deletions internal/controllers/topology/cluster/reconcile_state.go
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,11 @@ func (r *Reconciler) callAfterClusterUpgrade(ctx context.Context, s *scope.Scope
len(s.UpgradeTracker.MachineDeployments.UpgradingNames()) == 0 && // Machine deployments are not upgrading or not about to upgrade
!s.UpgradeTracker.MachineDeployments.IsAnyPendingCreate() && // No MachineDeployments are pending create
!s.UpgradeTracker.MachineDeployments.IsAnyPendingUpgrade() && // No MachineDeployments are pending an upgrade
!s.UpgradeTracker.MachineDeployments.DeferredUpgrade() { // No MachineDeployments have deferred an upgrade
!s.UpgradeTracker.MachineDeployments.DeferredUpgrade() && // No MachineDeployments have deferred an upgrade
len(s.UpgradeTracker.MachinePools.UpgradingNames()) == 0 && // Machine pools are not upgrading or not about to upgrade
!s.UpgradeTracker.MachinePools.IsAnyPendingCreate() && // No MachinePools are pending create
!s.UpgradeTracker.MachinePools.IsAnyPendingUpgrade() && // No MachinePools are pending an upgrade
!s.UpgradeTracker.MachinePools.DeferredUpgrade() { // No MachinePools have deferred an upgrade
// Everything is stable and the cluster can be considered fully upgraded.
hookRequest := &runtimehooksv1.AfterClusterUpgradeRequest{
Cluster: *s.Current.Cluster,
Expand Down Expand Up @@ -747,7 +751,7 @@ func (r *Reconciler) reconcileMachinePools(ctx context.Context, s *scope.Scope)
continue
}

if err := r.createMachinePool(ctx, s.Current.Cluster, mp); err != nil {
if err := r.createMachinePool(ctx, s, mp); err != nil {
return err
}
}
Expand Down Expand Up @@ -800,9 +804,23 @@ func (r *Reconciler) getCurrentMachinePools(ctx context.Context, s *scope.Scope)
}

// createMachinePool creates a MachinePool and the corresponding templates.
func (r *Reconciler) createMachinePool(ctx context.Context, cluster *clusterv1.Cluster, mp *scope.MachinePoolState) error {
log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object)
func (r *Reconciler) createMachinePool(ctx context.Context, s *scope.Scope, mp *scope.MachinePoolState) error {
// Do not create the MachinePool if it is marked as pending create.
// This will also block MHC creation because creating the MHC without the corresponding
// MachinePool is unnecessary.
mpTopologyName, ok := mp.Object.Labels[clusterv1.ClusterTopologyMachinePoolNameLabel]
if !ok || mpTopologyName == "" {
// Note: This is only an additional safety check and should not happen. The label will always be added when computing
// the desired MachinePool.
return errors.Errorf("new MachinePool is missing the %q label", clusterv1.ClusterTopologyMachinePoolNameLabel)
}
// Return early if the MachinePool is pending create.
if s.UpgradeTracker.MachinePools.IsPendingCreate(mpTopologyName) {
return nil
}

log := tlog.LoggerFrom(ctx).WithMachinePool(mp.Object)
cluster := s.Current.Cluster
infraCtx, _ := log.WithObject(mp.InfrastructureMachinePoolObject).Into(ctx)
if err := r.reconcileReferencedObject(infraCtx, reconcileReferencedObjectInput{
cluster: cluster,
Expand Down Expand Up @@ -830,6 +848,23 @@ func (r *Reconciler) createMachinePool(ctx context.Context, cluster *clusterv1.C
}
r.recorder.Eventf(cluster, corev1.EventTypeNormal, createEventReason, "Created %q", tlog.KObj{Obj: mp.Object})

// Wait until MachinePool is visible in the cache.
// Note: We have to do this because otherwise using a cached client in current state could
// miss a newly created MachinePool (because the cache might be stale).
err = wait.PollUntilContextTimeout(ctx, 5*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
key := client.ObjectKey{Namespace: mp.Object.Namespace, Name: mp.Object.Name}
if err := r.Client.Get(ctx, key, &expv1.MachinePool{}); err != nil {
if apierrors.IsNotFound(err) {
return false, nil
}
return false, err
}
return true, nil
})
if err != nil {
return errors.Wrapf(err, "failed waiting for MachinePool %s to be visible in the cache after create", mp.Object.Kind)
}

return nil
}

Expand Down Expand Up @@ -857,7 +892,7 @@ func (r *Reconciler) updateMachinePool(ctx context.Context, cluster *clusterv1.C
return errors.Wrapf(err, "failed to reconcile %s", tlog.KObj{Obj: currentMP.Object})
}

// Check differences between current and desired MachineDeployment, and eventually patch the current object.
// Check differences between current and desired MachinePool, and eventually patch the current object.
log = log.WithObject(desiredMP.Object)
patchHelper, err := r.patchHelperFactory(ctx, currentMP.Object, desiredMP.Object)
if err != nil {
Expand Down Expand Up @@ -904,8 +939,8 @@ type machineDiff struct {
toCreate, toUpdate, toDelete []string
}

// calculateMachineDeploymentDiff compares two maps of MachineDeploymentState and calculates which
// MachineDeployments should be created, updated or deleted.
// calculateMachineDeploymentDiff compares two maps of MachinePoolState and calculates which
// MachinePools should be created, updated or deleted.
func calculateMachineDeploymentDiff(current, desired map[string]*scope.MachineDeploymentState) machineDiff {
var diff machineDiff

Expand Down

0 comments on commit fbc594a

Please sign in to comment.