Skip to content

Commit

Permalink
Wait for all descendants when deleting a cluster
Browse files Browse the repository at this point in the history
Instead of waiting for all direct descendants to be deleted and then
allowing cluster deletion to proceed (by removing the finalizer), wait
for all descendants (both direct and indirect) to be removed before
allowing cluster deletion to proceed. There can be race conditions where
there are indirect descendants (machines belonging to a machine set)
that still exist, and they need the cluster to remain so they can be
deleted properly.

Signed-off-by: Andy Goldstein <goldsteina@vmware.com>
(cherry picked from commit a193a17)
  • Loading branch information
ncdc committed Dec 11, 2019
1 parent 81cb545 commit 5762854
Show file tree
Hide file tree
Showing 2 changed files with 222 additions and 24 deletions.
84 changes: 60 additions & 24 deletions controllers/cluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,9 +161,17 @@ func (r *ClusterReconciler) reconcile(ctx context.Context, cluster *clusterv1.Cl

// reconcileDelete handles cluster deletion.
func (r *ClusterReconciler) reconcileDelete(ctx context.Context, cluster *clusterv1.Cluster) (reconcile.Result, error) {
children, err := r.listChildren(ctx, cluster)
logger := r.Log.WithValues("cluster", cluster.Name, "namespace", cluster.Namespace)

descendants, err := r.listDescendants(ctx, cluster)
if err != nil {
klog.Errorf("Failed to list children of cluster %s/%s: %v", cluster.Namespace, cluster.Name, err)
logger.Error(err, "Failed to list descendants")
return reconcile.Result{}, err
}

children, err := descendants.filterOwnedDescendants(cluster)
if err != nil {
logger.Error(err, "Failed to extract direct descendants")
return reconcile.Result{}, err
}

Expand Down Expand Up @@ -197,8 +205,12 @@ func (r *ClusterReconciler) reconcileDelete(ctx context.Context, cluster *cluste
if len(errs) > 0 {
return ctrl.Result{}, kerrors.NewAggregate(errs)
}
}

// Requeue so we can check the next time to see if there are still any children left.
if descendantCount := descendants.length(); descendantCount > 0 {
indirect := descendantCount - len(children)
logger.Info("Cluster still has descendants - need to requeue", "direct", len(children), "indirect", indirect)
// Requeue so we can check the next time to see if there are still any descendants left.
return ctrl.Result{RequeueAfter: deleteRequeueAfter}, nil
}

Expand Down Expand Up @@ -229,57 +241,81 @@ func (r *ClusterReconciler) reconcileDelete(ctx context.Context, cluster *cluste
return ctrl.Result{}, nil
}

// listChildren returns a list of MachineDeployments, MachineSets, and Machines than have an owner reference to cluster
func (r *ClusterReconciler) listChildren(ctx context.Context, cluster *clusterv1.Cluster) ([]runtime.Object, error) {
type clusterDescendants struct {
machineDeployments clusterv1.MachineDeploymentList
machineSets clusterv1.MachineSetList
controlPlaneMachines clusterv1.MachineList
workerMachines clusterv1.MachineList
}

// length returns the number of descendants
func (c *clusterDescendants) length() int {
return len(c.machineDeployments.Items) +
len(c.machineSets.Items) +
len(c.controlPlaneMachines.Items) +
len(c.workerMachines.Items)
}

// listDescendants returns a list of all MachineDeployments, MachineSets, and Machines for the cluster.
func (r *ClusterReconciler) listDescendants(ctx context.Context, cluster *clusterv1.Cluster) (clusterDescendants, error) {
var descendants clusterDescendants

listOptions := []client.ListOption{
client.InNamespace(cluster.Namespace),
client.MatchingLabels(map[string]string{clusterv1.MachineClusterLabelName: cluster.Name}),
}

machineDeployments := &clusterv1.MachineDeploymentList{}
if err := r.Client.List(ctx, machineDeployments, listOptions...); err != nil {
return nil, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", cluster.Namespace, cluster.Name)
if err := r.Client.List(ctx, &descendants.machineDeployments, listOptions...); err != nil {
return descendants, errors.Wrapf(err, "failed to list MachineDeployments for cluster %s/%s", cluster.Namespace, cluster.Name)
}

machineSets := &clusterv1.MachineSetList{}
if err := r.Client.List(ctx, machineSets, listOptions...); err != nil {
return nil, errors.Wrapf(err, "failed to list MachineSets for cluster %s/%s", cluster.Namespace, cluster.Name)
if err := r.Client.List(ctx, &descendants.machineSets, listOptions...); err != nil {
return descendants, errors.Wrapf(err, "failed to list MachineSets for cluster %s/%s", cluster.Namespace, cluster.Name)
}

allMachines := &clusterv1.MachineList{}
if err := r.Client.List(ctx, allMachines, listOptions...); err != nil {
return nil, errors.Wrapf(err, "failed to list Machines for cluster %s/%s", cluster.Namespace, cluster.Name)
var machines clusterv1.MachineList
if err := r.Client.List(ctx, &machines, listOptions...); err != nil {
return descendants, errors.Wrapf(err, "failed to list Machines for cluster %s/%s", cluster.Namespace, cluster.Name)
}
controlPlaneMachines, machines := splitMachineList(allMachines)

var children []runtime.Object
// Split machines into control plane and worker machines so we make sure we delete control plane machines last
controlPlaneMachines, workerMachines := splitMachineList(&machines)
descendants.controlPlaneMachines = *controlPlaneMachines
descendants.workerMachines = *workerMachines

return descendants, nil
}

// filterOwnedDescendants returns an array of runtime.Objects containing only those descendants that have the cluster
// as an owner reference, with control plane machines sorted last.
func (c clusterDescendants) filterOwnedDescendants(cluster *clusterv1.Cluster) ([]runtime.Object, error) {
var ownedDescendants []runtime.Object
eachFunc := func(o runtime.Object) error {
acc, err := meta.Accessor(o)
if err != nil {
klog.Errorf("Cluster %s/%s: couldn't create accessor for type %T: %v", cluster.Namespace, cluster.Name, o, err)
return nil
}

if util.PointsTo(acc.GetOwnerReferences(), &cluster.ObjectMeta) {
children = append(children, o)
ownedDescendants = append(ownedDescendants, o)
}

return nil
}

lists := []runtime.Object{
machineDeployments,
machineSets,
machines,
controlPlaneMachines,
&c.machineDeployments,
&c.machineSets,
&c.workerMachines,
&c.controlPlaneMachines,
}
for _, list := range lists {
if err := meta.EachListItem(list, eachFunc); err != nil {
return nil, errors.Wrapf(err, "error finding children of cluster %s/%s", cluster.Namespace, cluster.Name)
return nil, errors.Wrapf(err, "error finding owned descendants of cluster %s/%s", cluster.Namespace, cluster.Name)
}
}

return children, nil
return ownedDescendants, nil
}

// splitMachineList separates the machines running the control plane from other worker nodes.
Expand Down
162 changes: 162 additions & 0 deletions controllers/cluster_controller_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
. "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/pointer"
clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha2"
"sigs.k8s.io/cluster-api/util/patch"
Expand Down Expand Up @@ -414,3 +415,164 @@ func TestClusterReconciler_machineToCluster(t *testing.T) {
})
}
}

type machineDeploymentBuilder struct {
md clusterv1.MachineDeployment
}

func newMachineDeploymentBuilder() *machineDeploymentBuilder {
return &machineDeploymentBuilder{}
}

func (b *machineDeploymentBuilder) named(name string) *machineDeploymentBuilder {
b.md.Name = name
return b
}

func (b *machineDeploymentBuilder) ownedBy(c *clusterv1.Cluster) *machineDeploymentBuilder {
b.md.OwnerReferences = append(b.md.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: c.Name,
})
return b
}

func (b *machineDeploymentBuilder) build() clusterv1.MachineDeployment {
return b.md
}

type machineSetBuilder struct {
ms clusterv1.MachineSet
}

func newMachineSetBuilder() *machineSetBuilder {
return &machineSetBuilder{}
}

func (b *machineSetBuilder) named(name string) *machineSetBuilder {
b.ms.Name = name
return b
}

func (b *machineSetBuilder) ownedBy(c *clusterv1.Cluster) *machineSetBuilder {
b.ms.OwnerReferences = append(b.ms.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: c.Name,
})
return b
}

func (b *machineSetBuilder) build() clusterv1.MachineSet {
return b.ms
}

type machineBuilder struct {
m clusterv1.Machine
}

func newMachineBuilder() *machineBuilder {
return &machineBuilder{}
}

func (b *machineBuilder) named(name string) *machineBuilder {
b.m.Name = name
return b
}

func (b *machineBuilder) ownedBy(c *clusterv1.Cluster) *machineBuilder {
b.m.OwnerReferences = append(b.m.OwnerReferences, metav1.OwnerReference{
APIVersion: clusterv1.GroupVersion.String(),
Kind: "Cluster",
Name: c.Name,
})
return b
}

func (b *machineBuilder) controlPlane() *machineBuilder {
b.m.Labels = map[string]string{clusterv1.MachineControlPlaneLabelName: "true"}
return b
}

func (b *machineBuilder) build() clusterv1.Machine {
return b.m
}

func TestFilterOwnedDescendants(t *testing.T) {
c := clusterv1.Cluster{
ObjectMeta: metav1.ObjectMeta{
Name: "c",
},
}

md1NotOwnedByCluster := newMachineDeploymentBuilder().named("md1").build()
md2OwnedByCluster := newMachineDeploymentBuilder().named("md2").ownedBy(&c).build()
md3NotOwnedByCluster := newMachineDeploymentBuilder().named("md3").build()
md4OwnedByCluster := newMachineDeploymentBuilder().named("md4").ownedBy(&c).build()

ms1NotOwnedByCluster := newMachineSetBuilder().named("ms1").build()
ms2OwnedByCluster := newMachineSetBuilder().named("ms2").ownedBy(&c).build()
ms3NotOwnedByCluster := newMachineSetBuilder().named("ms3").build()
ms4OwnedByCluster := newMachineSetBuilder().named("ms4").ownedBy(&c).build()

m1NotOwnedByCluster := newMachineBuilder().named("m1").build()
m2OwnedByCluster := newMachineBuilder().named("m2").ownedBy(&c).build()
m3ControlPlaneOwnedByCluster := newMachineBuilder().named("m3").ownedBy(&c).controlPlane().build()
m4NotOwnedByCluster := newMachineBuilder().named("m4").build()
m5OwnedByCluster := newMachineBuilder().named("m5").ownedBy(&c).build()
m6ControlPlaneOwnedByCluster := newMachineBuilder().named("m6").ownedBy(&c).controlPlane().build()

d := clusterDescendants{
machineDeployments: clusterv1.MachineDeploymentList{
Items: []clusterv1.MachineDeployment{
md1NotOwnedByCluster,
md2OwnedByCluster,
md3NotOwnedByCluster,
md4OwnedByCluster,
},
},
machineSets: clusterv1.MachineSetList{
Items: []clusterv1.MachineSet{
ms1NotOwnedByCluster,
ms2OwnedByCluster,
ms3NotOwnedByCluster,
ms4OwnedByCluster,
},
},
controlPlaneMachines: clusterv1.MachineList{
Items: []clusterv1.Machine{
m3ControlPlaneOwnedByCluster,
m6ControlPlaneOwnedByCluster,
},
},
workerMachines: clusterv1.MachineList{
Items: []clusterv1.Machine{
m1NotOwnedByCluster,
m2OwnedByCluster,
m4NotOwnedByCluster,
m5OwnedByCluster,
},
},
}

actual, err := d.filterOwnedDescendants(&c)
if err != nil {
t.Fatalf("unexpected error: %v", err)
}

expected := []runtime.Object{
&md2OwnedByCluster,
&md4OwnedByCluster,
&ms2OwnedByCluster,
&ms4OwnedByCluster,
&m2OwnedByCluster,
&m5OwnedByCluster,
&m3ControlPlaneOwnedByCluster,
&m6ControlPlaneOwnedByCluster,
}

if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %v, got %v", expected, actual)
}
}

0 comments on commit 5762854

Please sign in to comment.