Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

✨ Add EMLB deletion #771

Merged
merged 10 commits into from
Jun 18, 2024
3 changes: 3 additions & 0 deletions api/v1beta1/packetcluster_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ import (
)

const (
// ClusterFinalizer allows DockerClusterReconciler to clean up resources associated with DockerCluster before
// removing it from the apiserver.
ClusterFinalizer = "packetcluster.infrastructure.cluster.x-k8s.io"
// NetworkInfrastructureReadyCondition reports of current status of cluster infrastructure.
NetworkInfrastructureReadyCondition clusterv1.ConditionType = "NetworkInfrastructureReady"
)
Expand Down
31 changes: 28 additions & 3 deletions controllers/packetcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ package controllers
import (
"context"
"errors"
"fmt"

apierrors "k8s.io/apimachinery/pkg/api/errors"
clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1"
Expand All @@ -31,6 +32,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/controller"
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
"sigs.k8s.io/controller-runtime/pkg/handler"

infrav1 "sigs.k8s.io/cluster-api-provider-packet/api/v1beta1"
Expand Down Expand Up @@ -102,7 +104,14 @@ func (r *PacketClusterReconciler) Reconcile(ctx context.Context, req ctrl.Reques

// Handle deleted clusters
if !cluster.DeletionTimestamp.IsZero() {
return r.reconcileDelete(ctx, clusterScope)
return ctrl.Result{}, r.reconcileDelete(ctx, clusterScope)
}

// Add finalizer first if not set to avoid the race condition between init and delete.
// Note: Finalizers in general can only be added when the deletionTimestamp is not set.
if !controllerutil.ContainsFinalizer(packetcluster, infrav1.ClusterFinalizer) {
controllerutil.AddFinalizer(packetcluster, infrav1.ClusterFinalizer)
return ctrl.Result{}, nil
}

err = r.reconcileNormal(ctx, clusterScope)
Expand Down Expand Up @@ -177,12 +186,28 @@ func (r *PacketClusterReconciler) reconcileNormal(ctx context.Context, clusterSc
return nil
}

func (r *PacketClusterReconciler) reconcileDelete(_ context.Context, _ *scope.ClusterScope) (ctrl.Result, error) {
func (r *PacketClusterReconciler) reconcileDelete(ctx context.Context, clusterScope *scope.ClusterScope) error {
log := ctrl.LoggerFrom(ctx).WithValues("cluster", clusterScope.Cluster.Name)
log.Info("Reconciling PacketCluster Deletion")

packetCluster := clusterScope.PacketCluster

if packetCluster.Spec.VIPManager == emlb.EMLBVIPID {
// Create new EMLB object
lb := emlb.NewEMLB(r.PacketClient.GetConfig().DefaultHeader["X-Auth-Token"], packetCluster.Spec.ProjectID, packetCluster.Spec.Metro)

if err := lb.DeleteLoadBalancer(ctx, clusterScope); err != nil {
return fmt.Errorf("failed to delete load balancer: %w", err)
}
}
// Initially I created this handler to remove an elastic IP when a cluster
// gets delete, but it does not sound like a good idea. It is better to
// leave to the users the ability to decide if they want to keep and resign
// the IP or if they do not need it anymore
return ctrl.Result{}, nil

// Cluster is deleted so remove the finalizer.
controllerutil.RemoveFinalizer(packetCluster, infrav1.ClusterFinalizer)
return nil
}

func (r *PacketClusterReconciler) SetupWithManager(ctx context.Context, mgr ctrl.Manager, options controller.Options) error {
Expand Down
24 changes: 17 additions & 7 deletions controllers/packetmachine_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -164,6 +164,13 @@ func (r *PacketMachineReconciler) Reconcile(ctx context.Context, req ctrl.Reques
}
}()

// Add finalizer first if not set to avoid the race condition between init and delete.
displague marked this conversation as resolved.
Show resolved Hide resolved
// Note: Finalizers in general can only be added when the deletionTimestamp is not set.
if packetmachine.ObjectMeta.DeletionTimestamp.IsZero() && !controllerutil.ContainsFinalizer(packetmachine, infrav1.MachineFinalizer) {
controllerutil.AddFinalizer(packetmachine, infrav1.MachineFinalizer)
return ctrl.Result{}, nil
}

// Handle deleted machines
if !packetmachine.ObjectMeta.DeletionTimestamp.IsZero() {
err = r.reconcileDelete(ctx, machineScope)
Expand Down Expand Up @@ -262,12 +269,6 @@ func (r *PacketMachineReconciler) reconcile(ctx context.Context, machineScope *s
return ctrl.Result{}, nil
}

// If the PacketMachine doesn't have our finalizer, add it.
controllerutil.AddFinalizer(packetmachine, infrav1.MachineFinalizer)
if err := machineScope.PatchObject(ctx); err != nil {
log.Error(err, "unable to patch object")
}

if !machineScope.Cluster.Status.InfrastructureReady {
log.Info("Cluster infrastructure is not ready yet")
conditions.MarkFalse(machineScope.PacketMachine, infrav1.DeviceReadyCondition, infrav1.WaitingForClusterInfrastructureReason, clusterv1.ConditionSeverityInfo, "")
Expand Down Expand Up @@ -536,12 +537,21 @@ func (r *PacketMachineReconciler) reconcileDelete(ctx context.Context, machineSc
device = dev
}

// We should never get there but this is a safetly check
// We should never get there but this is a safety check
if device == nil {
controllerutil.RemoveFinalizer(packetmachine, infrav1.MachineFinalizer)
return fmt.Errorf("%w: %s", errMissingDevice, packetmachine.Name)
}

if machineScope.PacketCluster.Spec.VIPManager == emlb.EMLBVIPID {
// Create new EMLB object
lb := emlb.NewEMLB(r.PacketClient.GetConfig().DefaultHeader["X-Auth-Token"], machineScope.PacketCluster.Spec.ProjectID, packetmachine.Spec.Metro)

if err := lb.DeleteLoadBalancerOrigin(ctx, machineScope); err != nil {
return fmt.Errorf("failed to delete load balancer origin: %w", err)
}
}

apiRequest := r.PacketClient.DevicesApi.DeleteDevice(ctx, device.GetId()).ForceDelete(force)
if _, err := apiRequest.Execute(); err != nil { //nolint:bodyclose // see https://github.com/timakin/bodyclose/issues/42
return fmt.Errorf("failed to delete the machine: %w", err)
Expand Down
75 changes: 69 additions & 6 deletions internal/emlb/emlb.go
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ func NewEMLB(metalAPIKey, projectID, metro string) *EMLB {
return manager
}

// ReconcileLoadBalancer creates a new Equinix Metal Load Balancer.
// ReconcileLoadBalancer creates a new Equinix Metal Load Balancer and associates it with the given ClusterScope.
func (e *EMLB) ReconcileLoadBalancer(ctx context.Context, clusterScope *scope.ClusterScope) error {
log := ctrl.LoggerFrom(ctx)

Expand Down Expand Up @@ -156,7 +156,7 @@ func (e *EMLB) ReconcileVIPOrigin(ctx context.Context, machineScope *scope.Machi
}

// Fetch the Load Balancer object.
lb, err := e.getLoadBalancer(ctx, lbID)
lb, _, err := e.getLoadBalancer(ctx, lbID)
if err != nil {
return err
}
Expand Down Expand Up @@ -233,12 +233,65 @@ func (e *EMLB) ReconcileVIPOrigin(ctx context.Context, machineScope *scope.Machi
return nil
}

// DeleteLoadBalancer deletes the Equinix Metal Load Balancer associated with a given ClusterScope.
func (e *EMLB) DeleteLoadBalancer(ctx context.Context, clusterScope *scope.ClusterScope) error {
log := ctrl.LoggerFrom(ctx)

packetCluster := clusterScope.PacketCluster
clusterName := packetCluster.Name

// Make sure the cluster already has an EMLB ID in its packetCluster annotations, otherwise abort.
lbID, exists := packetCluster.Annotations[loadBalancerIDAnnotation]
if !exists || (lbID == "") {
log.Info("no Equinix Metal Load Balancer found in cluster's annotations, skipping EMLB delete")
return nil
}

log.Info("Deleting EMLB", "Cluster Metro", e.metro, "Cluster Name", clusterName, "Project ID", e.projectID, "Load Balancer ID", lbID)

resp, err := e.deleteLoadBalancer(ctx, lbID)
if err != nil {
if resp.StatusCode == http.StatusNotFound {
return nil
}
log.Error(err, "LB Delete Failed", "EMLB ID", lbID, "Response Body", resp.Body)
}

return err
}

// DeleteLoadBalancerOrigin deletes the Equinix Metal Load Balancer associated with a given ClusterScope.
func (e *EMLB) DeleteLoadBalancerOrigin(ctx context.Context, machineScope *scope.MachineScope) error {
// Initially, we're creating a single pool per origin, logic below needs to be updated if we move to a shared load balancer pool model.
log := ctrl.LoggerFrom(ctx)

clusterName := machineScope.Cluster.Name

// Make sure the machine has an EMLB Pool ID in its packetMachine annotations, otherwise abort.
lbPoolID, exists := machineScope.PacketMachine.Annotations[loadBalancerPoolIDAnnotation]
if !exists || (lbPoolID == "") {
return fmt.Errorf("no Equinix Metal Load Balancer Pool found in machine's annotations")
}

log.Info("Deleting EMLB Pool", "Cluster Metro", e.metro, "Cluster Name", clusterName, "Project ID", e.projectID, "Pool ID", lbPoolID)

resp, err := e.deletePool(ctx, lbPoolID)
if err != nil {
if resp.StatusCode != http.StatusNotFound {
return nil
}
log.Error(err, "LB Pool Delete Failed", "Pool ID", lbPoolID, "Response Body", resp.Body)
}

return err
}

// getLoadBalancer Returns a Load Balancer object given an id.
func (e *EMLB) getLoadBalancer(ctx context.Context, id string) (*lbaas.LoadBalancer, error) {
func (e *EMLB) getLoadBalancer(ctx context.Context, id string) (*lbaas.LoadBalancer, *http.Response, error) {
ctx = context.WithValue(ctx, lbaas.ContextOAuth2, e.tokenExchanger)

LoadBalancer, _, err := e.client.LoadBalancersApi.GetLoadBalancer(ctx, id).Execute()
return LoadBalancer, err
LoadBalancer, resp, err := e.client.LoadBalancersApi.GetLoadBalancer(ctx, id).Execute()
return LoadBalancer, resp, err
}

// getLoadBalancerPort Returns a Load Balancer Port object given an id.
Expand Down Expand Up @@ -350,7 +403,7 @@ func (e *EMLB) ensureLoadBalancer(ctx context.Context, lbID, lbname string, port
}

// Regardless of whether we just created it, fetch the loadbalancer object.
lb, err := e.getLoadBalancer(ctx, lbID)
lb, _, err := e.getLoadBalancer(ctx, lbID)
if err != nil {
return nil, nil, err
}
Expand Down Expand Up @@ -401,6 +454,16 @@ func (e *EMLB) createOrigin(ctx context.Context, poolID, originName string, targ
return e.client.PoolsApi.CreateLoadBalancerPoolOrigin(ctx, poolID).LoadBalancerPoolOriginCreate(createOriginRequest).Execute()
}

func (e *EMLB) deleteLoadBalancer(ctx context.Context, lbID string) (*http.Response, error) {
ctx = context.WithValue(ctx, lbaas.ContextOAuth2, e.tokenExchanger)
return e.client.LoadBalancersApi.DeleteLoadBalancer(ctx, lbID).Execute()
}

func (e *EMLB) deletePool(ctx context.Context, poolID string) (*http.Response, error) {
ctx = context.WithValue(ctx, lbaas.ContextOAuth2, e.tokenExchanger)
return e.client.PoolsApi.DeleteLoadBalancerPool(ctx, poolID).Execute()
}

func (e *EMLB) updateListenerPort(ctx context.Context, poolID, lbPortID string) (*lbaas.LoadBalancerPort, error) {
ctx = context.WithValue(ctx, lbaas.ContextOAuth2, e.tokenExchanger)

Expand Down