Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Extract getting nodes to delete for atomic node groups #7068

Merged
merged 1 commit into from
Jul 31, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 32 additions & 37 deletions cluster-autoscaler/core/static_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -743,7 +743,7 @@ func (a *StaticAutoscaler) removeOldUnregisteredNodes(allUnregisteredNodes []clu
csr *clusterstate.ClusterStateRegistry, currentTime time.Time, logRecorder *utils.LogEventRecorder) (bool, error) {

nodeGroups := a.nodeGroupsById()
nodesToBeDeletedByNodeGroupId := make(map[string][]clusterstate.UnregisteredNode)
nodesToDeleteByNodeGroupId := make(map[string][]clusterstate.UnregisteredNode)
for _, unregisteredNode := range allUnregisteredNodes {
nodeGroup, err := a.CloudProvider.NodeGroupForNode(unregisteredNode.Node)
if err != nil {
Expand All @@ -762,12 +762,12 @@ func (a *StaticAutoscaler) removeOldUnregisteredNodes(allUnregisteredNodes []clu

if unregisteredNode.UnregisteredSince.Add(maxNodeProvisionTime).Before(currentTime) {
klog.V(0).Infof("Marking unregistered node %v for removal", unregisteredNode.Node.Name)
nodesToBeDeletedByNodeGroupId[nodeGroup.Id()] = append(nodesToBeDeletedByNodeGroupId[nodeGroup.Id()], unregisteredNode)
nodesToDeleteByNodeGroupId[nodeGroup.Id()] = append(nodesToDeleteByNodeGroupId[nodeGroup.Id()], unregisteredNode)
}
}

removedAny := false
for nodeGroupId, unregisteredNodesToDelete := range nodesToBeDeletedByNodeGroupId {
for nodeGroupId, unregisteredNodesToDelete := range nodesToDeleteByNodeGroupId {
nodeGroup := nodeGroups[nodeGroupId]

klog.V(0).Infof("Removing %v unregistered nodes for node group %v", len(unregisteredNodesToDelete), nodeGroupId)
Expand All @@ -787,21 +787,11 @@ func (a *StaticAutoscaler) removeOldUnregisteredNodes(allUnregisteredNodes []clu
}
nodesToDelete := toNodes(unregisteredNodesToDelete)

opts, err := nodeGroup.GetOptions(a.NodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
klog.Warningf("Failed to get node group options for %s: %s", nodeGroupId, err)
nodesToDelete, err = overrideNodesToDeleteForZeroOrMax(a.NodeGroupDefaults, nodeGroup, nodesToDelete)
if err != nil {
klog.Warningf("Failed to remove unregistered nodes from node group %s: %v", nodeGroupId, err)
continue
}
// If a scale-up of "ZeroOrMaxNodeScaling" node group failed, the cleanup
// should stick to the all-or-nothing principle. Deleting all nodes.
if opts != nil && opts.ZeroOrMaxNodeScaling {
instances, err := nodeGroup.Nodes()
if err != nil {
klog.Warningf("Failed to fill in unregistered nodes from group %s based on ZeroOrMaxNodeScaling option: %s", nodeGroupId, err)
continue
}
nodesToDelete = instancesToFakeNodes(instances)
}

err = nodeGroup.DeleteNodes(nodesToDelete)
csr.InvalidateNodeInstancesCacheEntry(nodeGroup)
Expand Down Expand Up @@ -835,35 +825,19 @@ func (a *StaticAutoscaler) deleteCreatedNodesWithErrors() bool {
// We always schedule deleting of incoming errornous nodes
// TODO[lukaszos] Consider adding logic to not retry delete every loop iteration
nodeGroups := a.nodeGroupsById()
nodesToBeDeletedByNodeGroupId := a.clusterStateRegistry.GetCreatedNodesWithErrors()
nodesToDeleteByNodeGroupId := a.clusterStateRegistry.GetCreatedNodesWithErrors()

deletedAny := false

for nodeGroupId, nodesToBeDeleted := range nodesToBeDeletedByNodeGroupId {
for nodeGroupId, nodesToDelete := range nodesToDeleteByNodeGroupId {
var err error
klog.V(1).Infof("Deleting %v from %v node group because of create errors", len(nodesToBeDeleted), nodeGroupId)
klog.V(1).Infof("Deleting %v from %v node group because of create errors", len(nodesToDelete), nodeGroupId)

nodeGroup := nodeGroups[nodeGroupId]
if nodeGroup == nil {
err = fmt.Errorf("node group %s not found", nodeGroupId)
} else {
var opts *config.NodeGroupAutoscalingOptions
opts, err = nodeGroup.GetOptions(a.NodeGroupDefaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
klog.Warningf("Failed to get node group options for %s: %s", nodeGroupId, err)
continue
}
// If a scale-up of "ZeroOrMaxNodeScaling" node group failed, the cleanup
// should stick to the all-or-nothing principle. Deleting all nodes.
if opts != nil && opts.ZeroOrMaxNodeScaling {
instances, err := nodeGroup.Nodes()
if err != nil {
klog.Warningf("Failed to fill in failed nodes from group %s based on ZeroOrMaxNodeScaling option: %s", nodeGroupId, err)
continue
}
nodesToBeDeleted = instancesToFakeNodes(instances)
}
err = nodeGroup.DeleteNodes(nodesToBeDeleted)
} else if nodesToDelete, err = overrideNodesToDeleteForZeroOrMax(a.NodeGroupDefaults, nodeGroup, nodesToDelete); err == nil {
err = nodeGroup.DeleteNodes(nodesToDelete)
}

if err != nil {
Expand All @@ -877,6 +851,27 @@ func (a *StaticAutoscaler) deleteCreatedNodesWithErrors() bool {
return deletedAny
}

// overrideNodesToDeleteForZeroOrMax returns a list of nodes to delete, taking into account that
// node deletion for a "ZeroOrMaxNodeScaling" node group is atomic and should delete all nodes.
// For a non-"ZeroOrMaxNodeScaling" node group it returns the unchanged list of nodes to delete.
func overrideNodesToDeleteForZeroOrMax(defaults config.NodeGroupAutoscalingOptions, nodeGroup cloudprovider.NodeGroup, nodesToDelete []*apiv1.Node) ([]*apiv1.Node, error) {
opts, err := nodeGroup.GetOptions(defaults)
if err != nil && err != cloudprovider.ErrNotImplemented {
return []*apiv1.Node{}, fmt.Errorf("Failed to get node group options for %s: %s", nodeGroup.Id(), err)
}
// If a scale-up of "ZeroOrMaxNodeScaling" node group failed, the cleanup
// should stick to the all-or-nothing principle. Deleting all nodes.
if opts != nil && opts.ZeroOrMaxNodeScaling {
instances, err := nodeGroup.Nodes()
if err != nil {
return []*apiv1.Node{}, fmt.Errorf("Failed to fill in nodes to delete from group %s based on ZeroOrMaxNodeScaling option: %s", nodeGroup.Id(), err)
}
return instancesToFakeNodes(instances), nil
}
// No override needed.
return nodesToDelete, nil
}

// instancesToNodes returns a list of fake nodes with just names populated,
// so that they can be passed as nodes to delete
func instancesToFakeNodes(instances []cloudprovider.Instance) []*apiv1.Node {
Expand Down
51 changes: 51 additions & 0 deletions cluster-autoscaler/core/static_autoscaler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1910,6 +1910,57 @@ func TestStaticAutoscalerInstanceCreationErrors(t *testing.T) {
}
return names["D1"] && names["D2"] && names["D3"]
}))

// Node group with getOptions error gets no deletes.
nodeGroupError := &mockprovider.NodeGroup{}
nodeGroupError.On("Exist").Return(true)
nodeGroupError.On("Autoprovisioned").Return(false)
nodeGroupError.On("TargetSize").Return(1, nil)
nodeGroupError.On("Id").Return("E")
nodeGroupError.On("DeleteNodes", mock.Anything).Return(nil)
nodeGroupError.On("GetOptions", options.NodeGroupDefaults).Return(nil, fmt.Errorf("Failed to get options"))
nodeGroupError.On("Nodes").Return([]cloudprovider.Instance{
{
Id: "E1",
Status: &cloudprovider.InstanceStatus{
State: cloudprovider.InstanceRunning,
},
},
{

Id: "E2",
Status: &cloudprovider.InstanceStatus{
State: cloudprovider.InstanceCreating,
ErrorInfo: &cloudprovider.InstanceErrorInfo{
ErrorClass: cloudprovider.OutOfResourcesErrorClass,
ErrorCode: "QUOTA",
},
},
},
}, nil)

provider = &mockprovider.CloudProvider{}
provider.On("NodeGroups").Return([]cloudprovider.NodeGroup{nodeGroupError})
provider.On("NodeGroupForNode", mock.Anything).Return(
func(node *apiv1.Node) cloudprovider.NodeGroup {
if strings.HasPrefix(node.Spec.ProviderID, "E") {
return nodeGroupError
}
return nil
}, nil).Times(2)

clusterState = clusterstate.NewClusterStateRegistry(provider, clusterStateConfig, context.LogRecorder, NewBackoff(), nodeGroupConfigProcessor)
clusterState.RefreshCloudProviderNodeInstancesCache()
autoscaler.CloudProvider = provider
autoscaler.clusterStateRegistry = clusterState
// propagate nodes info in cluster state
clusterState.UpdateNodes([]*apiv1.Node{}, nil, now)

// delete nodes with create errors
removedNodes = autoscaler.deleteCreatedNodesWithErrors()
assert.False(t, removedNodes)

nodeGroupError.AssertNumberOfCalls(t, "DeleteNodes", 0)
}

type candidateTrackingFakePlanner struct {
Expand Down
Loading