Skip to content

Commit

Permalink
Start adding more conditions
Browse files Browse the repository at this point in the history
  • Loading branch information
Jont828 committed Nov 15, 2023
1 parent cee1ae1 commit 44ece9a
Showing 1 changed file with 63 additions and 5 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -317,11 +317,7 @@ func (r *DockerMachinePoolReconciler) reconcileNormal(ctx context.Context, clust
return ctrl.Result{}, nil
}

dockerMachinePool.Status.Ready = false
conditions.MarkFalse(dockerMachinePool, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "")

// if some machine is still provisioning, force reconcile in few seconds to check again infrastructure.
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
return r.updateStatus(ctx, cluster, machinePool, dockerMachinePool, dockerMachineList.Items)
}

func getDockerMachines(ctx context.Context, c client.Client, cluster clusterv1.Cluster, machinePool expv1.MachinePool, dockerMachinePool infraexpv1.DockerMachinePool) (*infrav1.DockerMachineList, error) {
Expand Down Expand Up @@ -379,6 +375,68 @@ func dockerMachineToDockerMachinePool(_ context.Context, o client.Object) []ctrl
return nil
}

// updateStatus updates the Status field for the MachineSet
// It checks for the current state of the replicas and updates the Status of the MachineSet.
func (r *DockerMachinePoolReconciler) updateStatus(ctx context.Context, cluster *clusterv1.Cluster, machinePool *expv1.MachinePool, dockerMachinePool *infraexpv1.DockerMachinePool, dockerMachines []infrav1.DockerMachine) (ctrl.Result, error) {
log := ctrl.LoggerFrom(ctx)

// List the Docker containers. This corresponds to a InfraMachinePool instance for providers.
labelFilters := map[string]string{dockerMachinePoolLabel: dockerMachinePool.Name}
externalMachines, err := docker.ListMachinesByCluster(ctx, cluster, labelFilters)
if err != nil {
return ctrl.Result{}, errors.Wrapf(err, "failed to list all machines in the cluster")
}

externalMachineMap := make(map[string]*docker.Machine)
for _, externalMachine := range externalMachines {
externalMachineMap[externalMachine.Name()] = externalMachine
}
_, readyMachines, err := r.getDeletionCandidates(ctx, dockerMachines, externalMachineMap, machinePool, dockerMachinePool)
if err != nil {
return ctrl.Result{}, err
}

readyReplicaCount := len(readyMachines)
desiredReplicas := int(*machinePool.Spec.Replicas)

switch {
// We are scaling up
case readyReplicaCount < desiredReplicas:
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingUpReason, clusterv1.ConditionSeverityWarning, "Scaling up MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
// We are scaling down
case readyReplicaCount > desiredReplicas:
conditions.MarkFalse(dockerMachinePool, clusterv1.ResizedCondition, clusterv1.ScalingDownReason, clusterv1.ConditionSeverityWarning, "Scaling down MachineSet to %d replicas (actual %d)", desiredReplicas, readyReplicaCount)
default:
// Make sure last resize operation is marked as completed.
// NOTE: we are checking the number of machines ready so we report resize completed only when the machines
// are actually provisioned (vs reporting completed immediately after the last machine object is created). This convention is also used by KCP.
if len(dockerMachines) == readyReplicaCount {
if conditions.IsFalse(dockerMachinePool, clusterv1.ResizedCondition) {
log.Info("All the replicas are ready", "replicas", readyReplicaCount)
}
conditions.MarkTrue(dockerMachinePool, clusterv1.ResizedCondition)
}
// This means that there was no error in generating the desired number of machine objects
conditions.MarkTrue(dockerMachinePool, clusterv1.MachinesCreatedCondition)
}

getters := make([]conditions.Getter, 0, len(dockerMachines))
for i := range dockerMachines {
getters = append(getters, &dockerMachines[i])
}

// Aggregate the operational state of all the machines; while aggregating we are adding the
// source ref (reason@machine/name) so the problem can be easily tracked down to its source machine.
conditions.SetAggregate(dockerMachinePool, expv1.ReplicasReadyCondition, getters, conditions.AddSourceRef(), conditions.WithStepCounterIf(false))
dockerMachinePool.Status.Ready = conditions.IsTrue(dockerMachinePool, expv1.ReplicasReadyCondition)

if !dockerMachinePool.Status.Ready {
return ctrl.Result{RequeueAfter: 10 * time.Second}, nil
}

return ctrl.Result{}, nil
}

func patchDockerMachinePool(ctx context.Context, patchHelper *patch.Helper, dockerMachinePool *infraexpv1.DockerMachinePool) error {
conditions.SetSummary(dockerMachinePool,
conditions.WithConditions(
Expand Down

0 comments on commit 44ece9a

Please sign in to comment.