Skip to content

Commit

Permalink
Reduce log spam in AtomicResizeFilteringProcessor
Browse files Browse the repository at this point in the history
Also, introduce default per-node logging quotas. For now, identical to
the per-pod ones.
  • Loading branch information
x13n committed Feb 7, 2024
1 parent 3802594 commit a842d4f
Show file tree
Hide file tree
Showing 2 changed files with 21 additions and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"k8s.io/autoscaler/cluster-autoscaler/cloudprovider"
"k8s.io/autoscaler/cluster-autoscaler/context"
"k8s.io/autoscaler/cluster-autoscaler/simulator"
"k8s.io/autoscaler/cluster-autoscaler/utils/klogx"
klog "k8s.io/klog/v2"
)

Expand Down Expand Up @@ -83,6 +84,8 @@ type AtomicResizeFilteringProcessor struct {

// GetNodesToRemove selects up to maxCount nodes for deletion, by selecting a first maxCount candidates
func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.AutoscalingContext, candidates []simulator.NodeToBeRemoved, maxCount int) []simulator.NodeToBeRemoved {
atomicQuota := klogx.NodesLoggingQuota()
standardQuota := klogx.NodesLoggingQuota()
nodesByGroup := map[cloudprovider.NodeGroup][]simulator.NodeToBeRemoved{}
result := []simulator.NodeToBeRemoved{}
for _, node := range candidates {
Expand All @@ -97,13 +100,15 @@ func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.Autoscali
continue
}
if autoscalingOptions != nil && autoscalingOptions.ZeroOrMaxNodeScaling {
klog.V(2).Infof("Considering node %s for atomic scale down", node.Node.Name)
klogx.V(2).UpTo(atomicQuota).Infof("Considering node %s for atomic scale down", node.Node.Name)
nodesByGroup[nodeGroup] = append(nodesByGroup[nodeGroup], node)
} else {
klog.V(2).Infof("Considering node %s for standard scale down", node.Node.Name)
klogx.V(2).UpTo(standardQuota).Infof("Considering node %s for standard scale down", node.Node.Name)
result = append(result, node)
}
}
klogx.V(2).Over(atomicQuota).Infof("Considering %d other nodes for atomic scale down", -atomicQuota.Left())
klogx.V(2).Over(standardQuota).Infof("Considering %d other nodes for standard scale down", -atomicQuota.Left())
for nodeGroup, nodes := range nodesByGroup {
ngSize, err := nodeGroup.TargetSize()
if err != nil {
Expand Down
14 changes: 14 additions & 0 deletions cluster-autoscaler/utils/klogx/defaults.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,12 @@ const (
// MaxPodsLoggedV5 is the maximum number of pods for which we will
// log detailed information every loop at verbosity >= 5.
MaxPodsLoggedV5 = 1000
// MaxNodesLogged is the maximum number of nodes for which we will
// log detailed information every loop at verbosity < 5.
MaxNodesLogged = 20
// MaxNodesLoggedV5 is the maximum number of nodes for which we will
// log detailed information every loop at verbosity >= 5.
MaxNodesLoggedV5 = 1000
)

// PodsLoggingQuota returns a new quota with default limit for pods at current verbosity.
Expand All @@ -34,3 +40,11 @@ func PodsLoggingQuota() *Quota {
}
return NewLoggingQuota(MaxPodsLogged)
}

// NodesLoggingQuota returns a new quota with default limit for nodes at current verbosity.
func NodesLoggingQuota() *Quota {
if klog.V(5).Enabled() {
return NewLoggingQuota(MaxNodesLoggedV5)
}
return NewLoggingQuota(MaxNodesLogged)
}

0 comments on commit a842d4f

Please sign in to comment.