Skip to content

Commit

Permalink
feat: Add LB exclusion label when deleting node
Browse files Browse the repository at this point in the history
Currently, when Karpenter drains and then deletes a Node from the
cluster, if that node is registered in a Target Group for an ALB/NLB the
corresponding EC2 instance is not removed. This leads to the potential
for increased errors when deleting nodes via Karpenter.

In order to help resolve this issue, this change adds the well-known
`node.kubernetes.io/exclude-from-external-balancers` label, which will
case the AWS LB controller to remove the node from the Target Group
while Karpenter is draining the node. This is similar to how the AWS
Node Termination Handler works (see
aws/aws-node-termination-handler#316).

In future, Karpenter might be enhanced to be able to wait for a
configurable period before deleting the Node and terminating the
associated instance as currently there's a race condition between the
Pods being drained off of the Node and the EC2 instance being removed
from the target group.
  • Loading branch information
DWSR committed Sep 17, 2022
1 parent bb65fe1 commit 5c48d5e
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 0 deletions.
19 changes: 19 additions & 0 deletions pkg/controllers/termination/suite_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -103,6 +103,25 @@ var _ = Describe("Termination", func() {
ExpectReconcileSucceeded(ctx, controller, client.ObjectKeyFromObject(node))
ExpectNotFound(ctx, env.Client, node)
})
It("should exlude nodes from load balancers when terminating", func() {
// This is a kludge to prevent the node from being deleted before we can
// inspect its labels
podNoEvict := test.Pod(test.PodOptions{
NodeName: node.Name,
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{v1alpha5.DoNotEvictPodAnnotationKey: "true"},
OwnerReferences: defaultOwnerRefs,
},
})

ExpectApplied(ctx, env.Client, node, podNoEvict)

Expect(env.Client.Delete(ctx, node)).To(Succeed())
node = ExpectNodeExists(ctx, env.Client, node.Name)
ExpectReconcileSucceeded(ctx, controller, client.ObjectKeyFromObject(node))
node = ExpectNodeExists(ctx, env.Client, node.Name)
Expect(node.Labels[v1.LabelNodeExcludeBalancers]).Should(Equal("karpenter"))
})
It("should not evict pods that tolerate unschedulable taint", func() {
podEvict := test.Pod(test.PodOptions{NodeName: node.Name, ObjectMeta: metav1.ObjectMeta{OwnerReferences: defaultOwnerRefs}})
podSkip := test.Pod(test.PodOptions{
Expand Down
5 changes: 5 additions & 0 deletions pkg/controllers/termination/terminate.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,11 @@ func (t *Terminator) cordon(ctx context.Context, node *v1.Node) error {
// 2. Cordon node
persisted := node.DeepCopy()
node.Spec.Unschedulable = true
// Handle nil map
if node.Labels == nil {
node.Labels = map[string]string{}
}
node.Labels[v1.LabelNodeExcludeBalancers] = "karpenter"
if err := t.KubeClient.Patch(ctx, node, client.MergeFrom(persisted)); err != nil {
return fmt.Errorf("patching node %s, %w", node.Name, err)
}
Expand Down

0 comments on commit 5c48d5e

Please sign in to comment.