diff --git a/internal/controllers/machine/machine_controller.go b/internal/controllers/machine/machine_controller.go index e35ac8d39fe0..566119046962 100644 --- a/internal/controllers/machine/machine_controller.go +++ b/internal/controllers/machine/machine_controller.go @@ -242,6 +242,7 @@ func patchMachine(ctx context.Context, patchHelper *patch.Helper, machine *clust // MHC reported condition should take precedence over the remediation progress clusterv1.MachineHealthCheckSucceededCondition, clusterv1.MachineOwnerRemediatedCondition, + clusterv1.DrainingSucceededCondition, ), conditions.WithStepCounterIf(machine.ObjectMeta.DeletionTimestamp.IsZero() && machine.Spec.ProviderID == nil), conditions.WithStepCounterIfOnly( diff --git a/internal/controllers/machine/machine_controller_test.go b/internal/controllers/machine/machine_controller_test.go index c303500eb404..7f73968fc96f 100644 --- a/internal/controllers/machine/machine_controller_test.go +++ b/internal/controllers/machine/machine_controller_test.go @@ -974,6 +974,17 @@ func TestMachineConditions(t *testing.T) { conditions.UnknownCondition(clusterv1.MachineNodeHealthyCondition, clusterv1.NodeInspectionFailedReason, "Failed to get the Node for this Machine by ProviderID"), }, }, + { + name: "ready condition summary consumes reason from the draining succeeded condition", + infraReady: true, + bootstrapReady: true, + beforeFunc: func(bootstrap, infra *unstructured.Unstructured, m *clusterv1.Machine) { + conditions.MarkFalse(m, clusterv1.DrainingSucceededCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, "") + }, + conditionsToAssert: []*clusterv1.Condition{ + conditions.FalseCondition(clusterv1.ReadyCondition, clusterv1.DrainingFailedReason, clusterv1.ConditionSeverityWarning, ""), + }, + }, } for _, tt := range testcases {