From a842d4f1088f338d59dde94e460304028ff930e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Daniel=20K=C5=82obuszewski?= Date: Wed, 7 Feb 2024 12:01:05 +0100 Subject: [PATCH] Reduce log spam in AtomicResizeFilteringProcessor Also, introduce default per-node logging quotas. For now, identical to the per-pod ones. --- .../processors/nodes/scale_down_set_processor.go | 9 +++++++-- cluster-autoscaler/utils/klogx/defaults.go | 14 ++++++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cluster-autoscaler/processors/nodes/scale_down_set_processor.go b/cluster-autoscaler/processors/nodes/scale_down_set_processor.go index b503e42a8ae1..9c86aadafa8a 100644 --- a/cluster-autoscaler/processors/nodes/scale_down_set_processor.go +++ b/cluster-autoscaler/processors/nodes/scale_down_set_processor.go @@ -20,6 +20,7 @@ import ( "k8s.io/autoscaler/cluster-autoscaler/cloudprovider" "k8s.io/autoscaler/cluster-autoscaler/context" "k8s.io/autoscaler/cluster-autoscaler/simulator" + "k8s.io/autoscaler/cluster-autoscaler/utils/klogx" klog "k8s.io/klog/v2" ) @@ -83,6 +84,8 @@ type AtomicResizeFilteringProcessor struct { // GetNodesToRemove selects up to maxCount nodes for deletion, by selecting a first maxCount candidates func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.AutoscalingContext, candidates []simulator.NodeToBeRemoved, maxCount int) []simulator.NodeToBeRemoved { + atomicQuota := klogx.NodesLoggingQuota() + standardQuota := klogx.NodesLoggingQuota() nodesByGroup := map[cloudprovider.NodeGroup][]simulator.NodeToBeRemoved{} result := []simulator.NodeToBeRemoved{} for _, node := range candidates { @@ -97,13 +100,15 @@ func (p *AtomicResizeFilteringProcessor) GetNodesToRemove(ctx *context.Autoscali continue } if autoscalingOptions != nil && autoscalingOptions.ZeroOrMaxNodeScaling { - klog.V(2).Infof("Considering node %s for atomic scale down", node.Node.Name) + klogx.V(2).UpTo(atomicQuota).Infof("Considering node %s for atomic scale down", node.Node.Name) nodesByGroup[nodeGroup] = append(nodesByGroup[nodeGroup], node) } else { - klog.V(2).Infof("Considering node %s for standard scale down", node.Node.Name) + klogx.V(2).UpTo(standardQuota).Infof("Considering node %s for standard scale down", node.Node.Name) result = append(result, node) } } + klogx.V(2).Over(atomicQuota).Infof("Considering %d other nodes for atomic scale down", -atomicQuota.Left()) + klogx.V(2).Over(standardQuota).Infof("Considering %d other nodes for standard scale down", -atomicQuota.Left()) for nodeGroup, nodes := range nodesByGroup { ngSize, err := nodeGroup.TargetSize() if err != nil { diff --git a/cluster-autoscaler/utils/klogx/defaults.go b/cluster-autoscaler/utils/klogx/defaults.go index 1b2dadd8a0da..29ba45844452 100644 --- a/cluster-autoscaler/utils/klogx/defaults.go +++ b/cluster-autoscaler/utils/klogx/defaults.go @@ -25,6 +25,12 @@ const ( // MaxPodsLoggedV5 is the maximum number of pods for which we will // log detailed information every loop at verbosity >= 5. MaxPodsLoggedV5 = 1000 + // MaxNodesLogged is the maximum number of nodes for which we will + // log detailed information every loop at verbosity < 5. + MaxNodesLogged = 20 + // MaxNodesLoggedV5 is the maximum number of nodes for which we will + // log detailed information every loop at verbosity >= 5. + MaxNodesLoggedV5 = 1000 ) // PodsLoggingQuota returns a new quota with default limit for pods at current verbosity. @@ -34,3 +40,11 @@ func PodsLoggingQuota() *Quota { } return NewLoggingQuota(MaxPodsLogged) } + +// NodesLoggingQuota returns a new quota with default limit for nodes at current verbosity. +func NodesLoggingQuota() *Quota { + if klog.V(5).Enabled() { + return NewLoggingQuota(MaxNodesLoggedV5) + } + return NewLoggingQuota(MaxNodesLogged) +}