Skip to content

Commit

Permalink
Adds conditions to machinepool
Browse files Browse the repository at this point in the history
Signed-off-by: Sagar Muchhal <muchhals@vmware.com>
  • Loading branch information
srm09 committed Sep 24, 2020
1 parent f06e210 commit 59e71b0
Show file tree
Hide file tree
Showing 9 changed files with 150 additions and 14 deletions.
8 changes: 4 additions & 4 deletions api/v1alpha3/condition_consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,13 @@ const (
)

const (
// InfrastructureReadyCondition reports a summary of current status of the infrastructure object defined for this cluster/machine.
// InfrastructureReadyCondition reports a summary of current status of the infrastructure object defined for this cluster/machine/machinepool.
// This condition is mirrored from the Ready condition in the infrastructure ref object, and
// the absence of this condition might signal problems in the reconcile external loops or the fact that
// the infrastructure provider does not not implements the Ready condition yet.
// the infrastructure provider does not not implement the Ready condition yet.
InfrastructureReadyCondition ConditionType = "InfrastructureReady"

// WaitingForInfrastructureFallbackReason (Severity=Info) documents a cluster/machine waiting for the cluster/machine infrastructure
// WaitingForInfrastructureFallbackReason (Severity=Info) documents a cluster/machine/machinepool waiting for the underlying infrastructure
// to be available.
// NOTE: This reason is used only as a fallback when the infrastructure object is not reporting its own ready condition.
WaitingForInfrastructureFallbackReason = "WaitingForInfrastructure"
Expand Down Expand Up @@ -73,7 +73,7 @@ const (
// BootstrapReadyCondition reports a summary of current status of the bootstrap object defined for this machine.
// This condition is mirrored from the Ready condition in the bootstrap ref object, and
// the absence of this condition might signal problems in the reconcile external loops or the fact that
// the bootstrap provider does not not implements the Ready condition yet.
// the bootstrap provider does not not implement the Ready condition yet.
BootstrapReadyCondition ConditionType = "BootstrapReady"

// WaitingForDataSecretFallbackReason (Severity=Info) documents a machine waiting for the bootstrap data secret
Expand Down
44 changes: 44 additions & 0 deletions config/crd/bases/exp.cluster.x-k8s.io_machinepools.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -389,6 +389,50 @@ spec:
bootstrapReady:
description: BootstrapReady is the state of the bootstrap provider.
type: boolean
conditions:
description: Conditions define the current service state of the MachinePool.
items:
description: Condition defines an observation of a Cluster API resource
operational state.
properties:
lastTransitionTime:
description: Last time the condition transitioned from one status
to another. This should be when the underlying condition changed.
If that is not known, then using the time when the API field
changed is acceptable.
format: date-time
type: string
message:
description: A human readable message indicating details about
the transition. This field may be empty.
type: string
reason:
description: The reason for the condition's last transition
in CamelCase. The specific API may choose whether or not this
field is considered a guaranteed API. This field may not be
empty.
type: string
severity:
description: Severity provides an explicit classification of
Reason code, so the users or machines can immediately understand
the current situation and act accordingly. The Severity field
MUST be set only when Status=False.
type: string
status:
description: Status of the condition, one of True, False, Unknown.
type: string
type:
description: Type of condition in CamelCase or in foo.example.com/CamelCase.
Many .condition.type values are consistent across resources
like Available, but because arbitrary conditions can be useful
(see .node.status.conditions), the ability to deconflict is
important.
type: string
required:
- status
- type
type: object
type: array
failureMessage:
description: FailureMessage indicates that there is a problem reconciling
the state, and will be set to a descriptive error message.
Expand Down
30 changes: 30 additions & 0 deletions exp/api/v1alpha3/condition_consts.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package v1alpha3

import clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"

// Conditions and condition Reasons for the MachinePool object

const (
// ReplicasReadyCondition reports an aggregate of current status of the replicas controlled by the MachinePool.
ReplicasReadyCondition clusterv1.ConditionType = "ReplicasReady"

// WaitingForReplicasReadyReason (Severity=Info) documents a machinepool waiting for the required replicas
// to be ready.
WaitingForReplicasReadyReason = "WaitingForReplicasReady"
)
16 changes: 14 additions & 2 deletions exp/api/v1alpha3/machinepool_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,10 @@ type MachinePoolStatus struct {
// ObservedGeneration is the latest generation observed by the controller.
// +optional
ObservedGeneration int64 `json:"observedGeneration,omitempty"`

// Conditions define the current service state of the MachinePool.
// +optional
Conditions clusterv1.Conditions `json:"conditions,omitempty"`
}

// ANCHOR_END: MachinePoolStatus
Expand Down Expand Up @@ -153,11 +157,11 @@ const (
// have become Kubernetes Nodes in the Ready state.
MachinePoolPhaseRunning = MachinePoolPhase("Running")

// MachinePoolPhaseRunning is the MachinePool state when the
// MachinePoolPhaseScalingUp is the MachinePool state when the
// MachinePool infrastructure is scaling up.
MachinePoolPhaseScalingUp = MachinePoolPhase("ScalingUp")

// MachinePoolPhaseRunning is the MachinePool state when the
// MachinePoolPhaseScalingDown is the MachinePool state when the
// MachinePool infrastructure is scaling down.
MachinePoolPhaseScalingDown = MachinePoolPhase("ScalingDown")

Expand Down Expand Up @@ -217,6 +221,14 @@ type MachinePool struct {
Status MachinePoolStatus `json:"status,omitempty"`
}

func (m *MachinePool) GetConditions() clusterv1.Conditions {
return m.Status.Conditions
}

func (m *MachinePool) SetConditions(conditions clusterv1.Conditions) {
m.Status.Conditions = conditions
}

// +kubebuilder:object:root=true

// MachinePoolList contains a list of MachinePool
Expand Down
7 changes: 7 additions & 0 deletions exp/api/v1alpha3/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 18 additions & 0 deletions exp/controllers/machinepool_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/cluster-api/util/predicates"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -135,11 +136,28 @@ func (r *MachinePoolReconciler) Reconcile(req ctrl.Request) (_ ctrl.Result, rete
r.reconcilePhase(mp)
// TODO(jpang): add support for metrics.

// Always update the readyCondition with the summary of the machinepool conditions.
conditions.SetSummary(mp,
conditions.WithConditions(
clusterv1.BootstrapReadyCondition,
clusterv1.InfrastructureReadyCondition,
expv1.ReplicasReadyCondition,
),
)

// Always attempt to patch the object and status after each reconciliation.
// Patch ObservedGeneration only if the reconciliation completed successfully
patchOpts := []patch.Option{}
if reterr == nil {
patchOpts = append(patchOpts, patch.WithStatusObservedGeneration{})
patchOpts = append(patchOpts,
patch.WithOwnedConditions{Conditions: []clusterv1.ConditionType{
clusterv1.ReadyCondition,
clusterv1.BootstrapReadyCondition,
clusterv1.InfrastructureReadyCondition,
expv1.ReplicasReadyCondition,
}},
)
}
if err := patchHelper.Patch(ctx, mp, patchOpts...); err != nil {
reterr = kerrors.NewAggregate([]error{reterr, err})
Expand Down
10 changes: 9 additions & 1 deletion exp/controllers/machinepool_controller_noderef.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
"sigs.k8s.io/cluster-api/controllers/remote"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
"sigs.k8s.io/cluster-api/util"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/controller-runtime/pkg/client"
)

Expand All @@ -53,6 +54,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *

// Check that the Machine doesn't already have a NodeRefs.
if mp.Status.Replicas == mp.Status.ReadyReplicas && len(mp.Status.NodeRefs) == int(mp.Status.ReadyReplicas) {
conditions.MarkTrue(mp, expv1.ReplicasReadyCondition)
return ctrl.Result{}, nil
}

Expand Down Expand Up @@ -95,6 +97,12 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
mp.Status.UnavailableReplicas = mp.Status.Replicas - mp.Status.AvailableReplicas
mp.Status.NodeRefs = nodeRefsResult.references

if mp.Status.Replicas != mp.Status.ReadyReplicas {
conditions.MarkFalse(mp, expv1.ReplicasReadyCondition, expv1.WaitingForReplicasReadyReason, clusterv1.ConditionSeverityInfo, "")
} else {
conditions.MarkTrue(mp, expv1.ReplicasReadyCondition)
}

logger.Info("Set MachinePools's NodeRefs", "noderefs", mp.Status.NodeRefs)
r.recorder.Event(mp, apicorev1.EventTypeNormal, "SuccessfulSetNodeRefs", fmt.Sprintf("%+v", mp.Status.NodeRefs))

Expand All @@ -106,7 +114,7 @@ func (r *MachinePoolReconciler) reconcileNodeRefs(ctx context.Context, cluster *
}

// deleteRetiredNodes deletes nodes that don't have a corresponding ProviderID in Spec.ProviderIDList.
// A MachinePool infrastucture provider indicates an instance in the set has been deleted by
// A MachinePool infrastructure provider indicates an instance in the set has been deleted by
// removing its ProviderID from the slice.
func (r *MachinePoolReconciler) deleteRetiredNodes(ctx context.Context, c client.Client, nodeRefs []apicorev1.ObjectReference, providerIDList []string) error {
logger := r.Log.WithValues("providerIDList", len(providerIDList))
Expand Down
19 changes: 18 additions & 1 deletion exp/controllers/machinepool_controller_phases.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ import (
capierrors "sigs.k8s.io/cluster-api/errors"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
"sigs.k8s.io/cluster-api/util/annotations"
"sigs.k8s.io/cluster-api/util/conditions"
"sigs.k8s.io/cluster-api/util/patch"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"
Expand Down Expand Up @@ -185,6 +186,7 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster
// If the bootstrap data secret is populated, set ready and return.
if m.Spec.Template.Spec.Bootstrap.Data != nil || m.Spec.Template.Spec.Bootstrap.DataSecretName != nil {
m.Status.BootstrapReady = true
conditions.MarkTrue(m, clusterv1.BootstrapReadyCondition)
return ctrl.Result{}, nil
}

Expand All @@ -197,7 +199,15 @@ func (r *MachinePoolReconciler) reconcileBootstrap(ctx context.Context, cluster
ready, err := external.IsReady(bootstrapConfig)
if err != nil {
return ctrl.Result{}, err
} else if !ready {
}

// Report a summary of current status of the bootstrap object defined for this machine pool.
conditions.SetMirror(m, clusterv1.BootstrapReadyCondition,
conditions.UnstructuredGetter(bootstrapConfig),
conditions.WithFallbackValue(ready, clusterv1.WaitingForDataSecretFallbackReason, clusterv1.ConditionSeverityInfo, ""),
)

if !ready {
logger.V(2).Info("Bootstrap provider is not ready, requeuing")
return ctrl.Result{RequeueAfter: externalReadyWait}, nil
}
Expand Down Expand Up @@ -247,6 +257,13 @@ func (r *MachinePoolReconciler) reconcileInfrastructure(ctx context.Context, clu
}

mp.Status.InfrastructureReady = ready

// Report a summary of current status of the infrastructure object defined for this machine pool.
conditions.SetMirror(mp, clusterv1.InfrastructureReadyCondition,
conditions.UnstructuredGetter(infraConfig),
conditions.WithFallbackValue(ready, clusterv1.WaitingForInfrastructureFallbackReason, clusterv1.ConditionSeverityInfo, ""),
)

if !mp.Status.InfrastructureReady {
logger.Info("Infrastructure provider is not ready, requeuing")
return ctrl.Result{RequeueAfter: externalReadyWait}, nil
Expand Down
12 changes: 6 additions & 6 deletions exp/controllers/machinepool_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,10 @@ import (
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3"
expv1 "sigs.k8s.io/cluster-api/exp/api/v1alpha3"
"sigs.k8s.io/cluster-api/test/helpers"
"sigs.k8s.io/cluster-api/util"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
)
Expand Down Expand Up @@ -114,7 +114,7 @@ func TestMachinePoolFinalizer(t *testing.T) {
g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())

mr := &MachinePoolReconciler{
Client: fake.NewFakeClientWithScheme(
Client: helpers.NewFakeClientWithScheme(
scheme.Scheme,
clusterCorrectMeta,
machinePoolValidCluster,
Expand Down Expand Up @@ -226,7 +226,7 @@ func TestMachinePoolOwnerReference(t *testing.T) {
g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())

mr := &MachinePoolReconciler{
Client: fake.NewFakeClientWithScheme(
Client: helpers.NewFakeClientWithScheme(
scheme.Scheme,
testCluster,
machinePoolInvalidCluster,
Expand Down Expand Up @@ -423,7 +423,7 @@ func TestReconcileMachinePoolRequest(t *testing.T) {

g.Expect(clusterv1.AddToScheme(scheme.Scheme)).To(Succeed())

clientFake := fake.NewFakeClientWithScheme(
clientFake := helpers.NewFakeClientWithScheme(
scheme.Scheme,
&testCluster,
&tc.machinePool,
Expand Down Expand Up @@ -557,7 +557,7 @@ func TestReconcileMachinePoolDeleteExternal(t *testing.T) {
}

r := &MachinePoolReconciler{
Client: fake.NewFakeClientWithScheme(scheme.Scheme, objs...),
Client: helpers.NewFakeClientWithScheme(scheme.Scheme, objs...),
Log: log.Log,
scheme: scheme.Scheme,
}
Expand Down Expand Up @@ -608,7 +608,7 @@ func TestRemoveMachinePoolFinalizerAfterDeleteReconcile(t *testing.T) {
}
key := client.ObjectKey{Namespace: m.Namespace, Name: m.Name}
mr := &MachinePoolReconciler{
Client: fake.NewFakeClientWithScheme(scheme.Scheme, testCluster, m),
Client: helpers.NewFakeClientWithScheme(scheme.Scheme, testCluster, m),
Log: log.Log,
scheme: scheme.Scheme,
}
Expand Down

0 comments on commit 59e71b0

Please sign in to comment.