Skip to content

Commit

Permalink
Refactor GetPodsForDeletion logic and tests into simulator
Browse files Browse the repository at this point in the history
  • Loading branch information
artemvmin committed Oct 7, 2023
1 parent 119c1f9 commit d0d4456
Show file tree
Hide file tree
Showing 16 changed files with 620 additions and 765 deletions.
9 changes: 3 additions & 6 deletions cluster-autoscaler/simulator/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -123,9 +123,6 @@ func (r *RemovalSimulator) FindNodesToRemove(
timestamp time.Time,
remainingPdbTracker pdb.RemainingPdbTracker,
) (nodesToRemove []NodeToBeRemoved, unremovableNodes []*UnremovableNode) {
result := make([]NodeToBeRemoved, 0)
unremovable := make([]*UnremovableNode, 0)

destinationMap := make(map[string]bool, len(destinations))
for _, destination := range destinations {
destinationMap[destination] = true
Expand All @@ -134,12 +131,12 @@ func (r *RemovalSimulator) FindNodesToRemove(
for _, nodeName := range candidates {
rn, urn := r.SimulateNodeRemoval(nodeName, destinationMap, timestamp, remainingPdbTracker)
if rn != nil {
result = append(result, *rn)
nodesToRemove = append(nodesToRemove, *rn)
} else if urn != nil {
unremovable = append(unremovable, urn)
unremovableNodes = append(unremovableNodes, urn)
}
}
return result, unremovable
return nodesToRemove, unremovableNodes
}

// SimulateNodeRemoval simulates removing a node from the cluster to check
Expand Down
28 changes: 10 additions & 18 deletions cluster-autoscaler/simulator/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -136,14 +136,11 @@ func TestFindNodesToRemove(t *testing.T) {
fullNodeInfo.AddPod(pod4)

emptyNodeToRemove := NodeToBeRemoved{
Node: emptyNode,
PodsToReschedule: []*apiv1.Pod{},
DaemonSetPods: []*apiv1.Pod{},
Node: emptyNode,
}
drainableNodeToRemove := NodeToBeRemoved{
Node: drainableNode,
PodsToReschedule: []*apiv1.Pod{pod1, pod2},
DaemonSetPods: []*apiv1.Pod{},
}

clusterSnapshot := clustersnapshot.NewBasicClusterSnapshot()
Expand All @@ -153,19 +150,16 @@ func TestFindNodesToRemove(t *testing.T) {

tests := []findNodesToRemoveTestConfig{
{
name: "just an empty node, should be removed",
pods: []*apiv1.Pod{},
candidates: []string{emptyNode.Name},
allNodes: []*apiv1.Node{emptyNode},
toRemove: []NodeToBeRemoved{emptyNodeToRemove},
unremovable: []*UnremovableNode{},
name: "just an empty node, should be removed",
candidates: []string{emptyNode.Name},
allNodes: []*apiv1.Node{emptyNode},
toRemove: []NodeToBeRemoved{emptyNodeToRemove},
},
{
name: "just a drainable node, but nowhere for pods to go to",
pods: []*apiv1.Pod{pod1, pod2},
candidates: []string{drainableNode.Name},
allNodes: []*apiv1.Node{drainableNode},
toRemove: []NodeToBeRemoved{},
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
},
{
Expand All @@ -181,16 +175,14 @@ func TestFindNodesToRemove(t *testing.T) {
pods: []*apiv1.Pod{pod1, pod2, pod4},
candidates: []string{drainableNode.Name},
allNodes: []*apiv1.Node{drainableNode, fullNode},
toRemove: []NodeToBeRemoved{},
unremovable: []*UnremovableNode{{Node: drainableNode, Reason: NoPlaceToMovePods}},
},
{
name: "4 nodes, 1 empty, 1 drainable",
pods: []*apiv1.Pod{pod1, pod2, pod3, pod4},
candidates: []string{emptyNode.Name, drainableNode.Name},
allNodes: []*apiv1.Node{emptyNode, drainableNode, fullNode, nonDrainableNode},
toRemove: []NodeToBeRemoved{emptyNodeToRemove, drainableNodeToRemove},
unremovable: []*UnremovableNode{},
name: "4 nodes, 1 empty, 1 drainable",
pods: []*apiv1.Pod{pod1, pod2, pod3, pod4},
candidates: []string{emptyNode.Name, drainableNode.Name},
allNodes: []*apiv1.Node{emptyNode, drainableNode, fullNode, nonDrainableNode},
toRemove: []NodeToBeRemoved{emptyNodeToRemove, drainableNodeToRemove},
},
}

Expand Down
29 changes: 9 additions & 20 deletions cluster-autoscaler/simulator/drain.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,6 @@ import (
// If listers is not nil it checks whether RC, DS, Jobs and RS that created
// these pods still exist.
func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options.NodeDeleteOptions, drainabilityRules rules.Rules, listers kube_util.ListerRegistry, remainingPdbTracker pdb.RemainingPdbTracker, timestamp time.Time) (pods []*apiv1.Pod, daemonSetPods []*apiv1.Pod, blockingPod *drain.BlockingPod, err error) {
var drainPods, drainDs []*apiv1.Pod
if drainabilityRules == nil {
drainabilityRules = rules.Default(deleteOptions)
}
Expand All @@ -55,31 +54,21 @@ func GetPodsToMove(nodeInfo *schedulerframework.NodeInfo, deleteOptions options.
pod := podInfo.Pod
status := drainabilityRules.Drainable(drainCtx, pod)
switch status.Outcome {
case drainability.UndefinedOutcome:
pods = append(pods, podInfo.Pod)
case drainability.DrainOk:
case drainability.UndefinedOutcome, drainability.DrainOk:
if drain.IsPodLongTerminating(pod, timestamp) {
continue
}
if pod_util.IsDaemonSetPod(pod) {
drainDs = append(drainDs, pod)
daemonSetPods = append(daemonSetPods, pod)
} else {
drainPods = append(drainPods, pod)
pods = append(pods, pod)
}
case drainability.BlockDrain:
blockingPod = &drain.BlockingPod{
return nil, nil, &drain.BlockingPod{
Pod: pod,
Reason: status.BlockingReason,
}
err = status.Error
return
}, status.Error
}
}

pods, daemonSetPods = drain.GetPodsForDeletionOnNodeDrain(
pods,
remainingPdbTracker.GetPdbs(),
deleteOptions.SkipNodesWithSystemPods,
deleteOptions.SkipNodesWithLocalStorage,
deleteOptions.SkipNodesWithCustomControllerPods,
timestamp)

return append(pods, drainPods...), append(daemonSetPods, drainDs...), nil, nil
return pods, daemonSetPods, nil, nil
}
Loading

0 comments on commit d0d4456

Please sign in to comment.