Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Implement delete for KubeadmControlPlane #2037

Merged
merged 3 commits into from
Jan 17, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 74 additions & 33 deletions controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
kerrors "k8s.io/apimachinery/pkg/util/errors"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apiserver/pkg/storage/names"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
Expand All @@ -55,6 +54,12 @@ import (
"sigs.k8s.io/cluster-api/util/secret"
)

const (
// DeleteRequeueAfter is how long to wait before checking again to see if
// all control plane machines have been deleted.
DeleteRequeueAfter = 30 * time.Second
)

// +kubebuilder:rbac:groups=core,resources=events,verbs=get;list;watch;create;patch
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;patch
// +kubebuilder:rbac:groups=infrastructure.cluster.x-k8s.io;bootstrap.cluster.x-k8s.io;controlplane.cluster.x-k8s.io,resources=*,verbs=get;list;watch;create;update;patch;delete
Expand Down Expand Up @@ -165,15 +170,12 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, kcp *cont
}

// TODO: handle proper adoption of Machines
ownedMachines, err := r.getOwnedMachines(
ctx,
kcp,
types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Name},
)
allMachines, err := r.getMachines(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Name})
if err != nil {
logger.Error(err, "failed to get list of owned machines")
logger.Error(err, "Failed to get list of machines")
dlipovetsky marked this conversation as resolved.
Show resolved Hide resolved
return ctrl.Result{}, err
}
ownedMachines := r.filterOwnedMachines(kcp, allMachines)

// Always attempt to update status
defer func() {
Expand Down Expand Up @@ -228,7 +230,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, kcp *cont

// Currently we are not handling upgrade, so treat all owned machines as one for now.
// Once we start handling upgrade, we'll need to filter this list and act appropriately
numMachines := len(ownedMachines)
numMachines := len(ownedMachines.Items)
desiredReplicas := int(*kcp.Spec.Replicas)
switch {
// We are creating the first replica
Expand Down Expand Up @@ -272,16 +274,13 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c
// This is necessary for CRDs including scale subresources.
kcp.Status.Selector = selector.String()

ownedMachines, err := r.getOwnedMachines(
ctx,
kcp,
types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Name},
)
allMachines, err := r.getMachines(ctx, types.NamespacedName{Namespace: cluster.Namespace, Name: cluster.Name})
if err != nil {
return errors.Wrap(err, "failed to get list of owned machines")
}
ownedMachines := r.filterOwnedMachines(kcp, allMachines)

replicas := int32(len(ownedMachines))
replicas := int32(len(ownedMachines.Items))
// TODO: take into account configuration hash once upgrades are in place
kcp.Status.Replicas = replicas

Expand All @@ -291,8 +290,8 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c
}

readyMachines := int32(0)
for _, m := range ownedMachines {

for i := range ownedMachines.Items {
m := &ownedMachines.Items[i]
dlipovetsky marked this conversation as resolved.
Show resolved Hide resolved
node, err := getMachineNode(ctx, remoteClient, m)
if err != nil {
return errors.Wrap(err, "failed to get referenced Node")
Expand Down Expand Up @@ -330,7 +329,7 @@ func (r *KubeadmControlPlaneReconciler) scaleUpControlPlane(ctx context.Context,
}
}

return utilerrors.NewAggregate(errs)
return kerrors.NewAggregate(errs)
}

func (r *KubeadmControlPlaneReconciler) initializeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane) error {
Expand Down Expand Up @@ -377,7 +376,7 @@ func (r *KubeadmControlPlaneReconciler) cloneConfigsAndGenerateMachine(ctx conte
errs = append(errs, errors.Wrap(err, "failed to cleanup generated resources"))
}

return utilerrors.NewAggregate(errs)
return kerrors.NewAggregate(errs)
}

return nil
Expand All @@ -400,7 +399,7 @@ func (r *KubeadmControlPlaneReconciler) cleanupFromGeneration(ctx context.Contex
}
}

return utilerrors.NewAggregate(errs)
return kerrors.NewAggregate(errs)
}

func (r *KubeadmControlPlaneReconciler) generateKubeadmConfig(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, cluster *clusterv1.Cluster, spec *bootstrapv1.KubeadmConfigSpec) (*corev1.ObjectReference, error) {
Expand Down Expand Up @@ -475,16 +474,63 @@ func generateKubeadmControlPlaneLabels(clusterName string) map[string]string {
}

// reconcileDelete handles KubeadmControlPlane deletion.
func (r *KubeadmControlPlaneReconciler) reconcileDelete(_ context.Context, kcp *controlplanev1.KubeadmControlPlane, logger logr.Logger) (ctrl.Result, error) {
err := errors.New("Not Implemented")
// The implementation does not take non-control plane workloads into
// consideration. This may or may not change in the future. Please see
// https://github.com/kubernetes-sigs/cluster-api/issues/2064
func (r *KubeadmControlPlaneReconciler) reconcileDelete(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, logger logr.Logger) (_ ctrl.Result, reterr error) {
// Fetch the Cluster.
cluster, err := util.GetOwnerCluster(ctx, r.Client, kcp.ObjectMeta)
dlipovetsky marked this conversation as resolved.
Show resolved Hide resolved
if err != nil {
logger.Error(err, "Failed to retrieve owner Cluster from the API Server")
return ctrl.Result{}, err
}
if cluster == nil {
logger.Info("Cluster Controller has not yet set OwnerRef")
return ctrl.Result{}, nil
}
logger = logger.WithValues("cluster", cluster.Name)

// Fetch Machines
allMachines, err := util.GetMachinesForCluster(ctx, r.Client, cluster)
if err != nil {
logger.Error(err, "Not Implemented")
logger.Error(err, "Failed to get list of machines")
return ctrl.Result{}, err
}
ownedMachines := r.filterOwnedMachines(kcp, allMachines)

controllerutil.RemoveFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
return ctrl.Result{}, nil
// Always attempt to update status
defer func() {
if err := r.updateStatus(ctx, kcp, cluster); err != nil {
logger.Error(err, "Failed to update status")
reterr = kerrors.NewAggregate([]error{reterr, err})
}
}()
dlipovetsky marked this conversation as resolved.
Show resolved Hide resolved

// Verify that only control plane machines remain
if len(allMachines.Items) != len(ownedMachines.Items) {
err := errors.New("at least one machine is not owned by the control plane")
logger.Error(err, "Failed to delete the control plane")
return ctrl.Result{}, err
}

// If no control plane machines remain, remove the finalizer
if len(ownedMachines.Items) == 0 {
controllerutil.RemoveFinalizer(kcp, controlplanev1.KubeadmControlPlaneFinalizer)
return ctrl.Result{}, nil
}

// Delete control plane machines in parallel
var errs []error
for i := range ownedMachines.Items {
m := &ownedMachines.Items[i]
if err := r.Client.Delete(ctx, m); err != nil && !apierrors.IsNotFound(err) {
dlipovetsky marked this conversation as resolved.
Show resolved Hide resolved
errs = append(errs, errors.Wrap(err, "failed to cleanup owned machines"))
}
}
if errs != nil {
return ctrl.Result{}, kerrors.NewAggregate(errs)
}
return ctrl.Result{RequeueAfter: DeleteRequeueAfter}, nil
}

func (r *KubeadmControlPlaneReconciler) reconcileKubeconfig(ctx context.Context, clusterName types.NamespacedName, endpoint clusterv1.APIEndpoint, kcp *controlplanev1.KubeadmControlPlane) error {
Expand Down Expand Up @@ -526,22 +572,17 @@ func (r *KubeadmControlPlaneReconciler) getMachines(ctx context.Context, cluster
return allMachines, nil
}

func (r *KubeadmControlPlaneReconciler) getOwnedMachines(ctx context.Context, kcp *controlplanev1.KubeadmControlPlane, clusterName types.NamespacedName) ([]*clusterv1.Machine, error) {
allMachines, err := r.getMachines(ctx, clusterName)
if err != nil {
return nil, err
}

var ownedMachines []*clusterv1.Machine
func (r *KubeadmControlPlaneReconciler) filterOwnedMachines(kcp *controlplanev1.KubeadmControlPlane, allMachines *clusterv1.MachineList) *clusterv1.MachineList {
ownedMachines := &clusterv1.MachineList{}
for i := range allMachines.Items {
m := allMachines.Items[i]
controllerRef := metav1.GetControllerOf(&m)
if controllerRef != nil && controllerRef.Kind == "KubeadmControlPlane" && controllerRef.Name == kcp.Name {
ownedMachines = append(ownedMachines, &m)
ownedMachines.Items = append(ownedMachines.Items, m)
}
}

return ownedMachines, nil
return ownedMachines
}

func (r *KubeadmControlPlaneReconciler) reconcileExternalReference(ctx context.Context, cluster *clusterv1.Cluster, ref corev1.ObjectReference) error {
Expand Down
Loading