diff --git a/pkg/descheduler/apis/config/types_loadaware.go b/pkg/descheduler/apis/config/types_loadaware.go index a55dab067..96f4f7b27 100644 --- a/pkg/descheduler/apis/config/types_loadaware.go +++ b/pkg/descheduler/apis/config/types_loadaware.go @@ -80,6 +80,9 @@ type LowNodeLoadArgs struct { // it is determined that the node is abnormal, and the Pods need to be migrated to reduce the load. AnomalyCondition *LoadAnomalyCondition + // DetectorCacheTimeout indicates the cache expiration time of nodeAnomalyDetectors, the default is 5 minutes + DetectorCacheTimeout *metav1.Duration + // NodePools supports multiple different types of batch nodes to configure different strategies NodePools []LowNodeLoadNodePool } diff --git a/pkg/descheduler/apis/config/v1alpha2/defaults.go b/pkg/descheduler/apis/config/v1alpha2/defaults.go index 526cb1d9c..5011a3d4c 100644 --- a/pkg/descheduler/apis/config/v1alpha2/defaults.go +++ b/pkg/descheduler/apis/config/v1alpha2/defaults.go @@ -45,6 +45,7 @@ const ( defaultMigrationEvictBurst = 1 defaultSchedulerSupportReservation = "koord-scheduler" defaultArbitrationInterval = 500 * time.Millisecond + defaultDetectorCacheTimeout = 5 * time.Minute ) var ( @@ -263,6 +264,9 @@ func SetDefaults_LowNodeLoadArgs(obj *LowNodeLoadArgs) { } else if obj.AnomalyCondition.ConsecutiveAbnormalities == 0 { obj.AnomalyCondition.ConsecutiveAbnormalities = defaultLoadAnomalyCondition.ConsecutiveAbnormalities } + if obj.DetectorCacheTimeout == nil { + obj.DetectorCacheTimeout = &metav1.Duration{Duration: defaultDetectorCacheTimeout} + } if obj.NodeMetricExpirationSeconds == nil { obj.NodeMetricExpirationSeconds = pointer.Int64(defaultNodeMetricExpirationSeconds) diff --git a/pkg/descheduler/apis/config/v1alpha2/defaults_test.go b/pkg/descheduler/apis/config/v1alpha2/defaults_test.go index 459827604..69bc302c0 100644 --- a/pkg/descheduler/apis/config/v1alpha2/defaults_test.go +++ b/pkg/descheduler/apis/config/v1alpha2/defaults_test.go @@ -41,6 +41,23 @@ func TestSetDefaults_LowNodeLoadArgs(t *testing.T) { NodeFit: pointer.Bool(false), NodeMetricExpirationSeconds: pointer.Int64(defaultNodeMetricExpirationSeconds), AnomalyCondition: defaultLoadAnomalyCondition, + DetectorCacheTimeout: &metav1.Duration{Duration: 5 * time.Minute}, + ResourceWeights: map[corev1.ResourceName]int64{ + corev1.ResourceCPU: 1, + corev1.ResourceMemory: 1, + }, + }, + }, + { + name: "set detectorCacheTimeout", + args: &LowNodeLoadArgs{ + DetectorCacheTimeout: &metav1.Duration{Duration: 10 * time.Minute}, + }, + expected: &LowNodeLoadArgs{ + NodeFit: pointer.Bool(true), + NodeMetricExpirationSeconds: pointer.Int64(defaultNodeMetricExpirationSeconds), + AnomalyCondition: defaultLoadAnomalyCondition, + DetectorCacheTimeout: &metav1.Duration{Duration: 10 * time.Minute}, ResourceWeights: map[corev1.ResourceName]int64{ corev1.ResourceCPU: 1, corev1.ResourceMemory: 1, @@ -64,6 +81,7 @@ func TestSetDefaults_LowNodeLoadArgs(t *testing.T) { ConsecutiveAbnormalities: defaultLoadAnomalyCondition.ConsecutiveAbnormalities, ConsecutiveNormalities: 3, }, + DetectorCacheTimeout: &metav1.Duration{Duration: 5 * time.Minute}, ResourceWeights: map[corev1.ResourceName]int64{ corev1.ResourceCPU: 1, corev1.ResourceMemory: 1, @@ -87,6 +105,7 @@ func TestSetDefaults_LowNodeLoadArgs(t *testing.T) { NodeFit: pointer.Bool(true), NodeMetricExpirationSeconds: pointer.Int64(defaultNodeMetricExpirationSeconds), AnomalyCondition: defaultLoadAnomalyCondition, + DetectorCacheTimeout: &metav1.Duration{Duration: 5 * time.Minute}, LowThresholds: ResourceThresholds{ corev1.ResourceCPU: 30, corev1.ResourceMemory: 30, diff --git a/pkg/descheduler/apis/config/v1alpha2/types_loadaware.go b/pkg/descheduler/apis/config/v1alpha2/types_loadaware.go index 6c8a05ad5..8dae93feb 100644 --- a/pkg/descheduler/apis/config/v1alpha2/types_loadaware.go +++ b/pkg/descheduler/apis/config/v1alpha2/types_loadaware.go @@ -79,6 +79,9 @@ type LowNodeLoadArgs struct { // it is determined that the node is abnormal, and the Pods need to be migrated to reduce the load. AnomalyCondition *LoadAnomalyCondition `json:"anomalyCondition,omitempty"` + // DetectorCacheTimeout indicates the cache expiration time of nodeAnomalyDetectors, the default is 5 minute + DetectorCacheTimeout *metav1.Duration `json:"detectorCacheTimeout,omitempty"` + // NodePools supports multiple different types of batch nodes to configure different strategies NodePools []LowNodeLoadNodePool `json:"nodePools,omitempty"` } diff --git a/pkg/descheduler/apis/config/v1alpha2/zz_generated.conversion.go b/pkg/descheduler/apis/config/v1alpha2/zz_generated.conversion.go index 7b8297d07..a0de464bf 100644 --- a/pkg/descheduler/apis/config/v1alpha2/zz_generated.conversion.go +++ b/pkg/descheduler/apis/config/v1alpha2/zz_generated.conversion.go @@ -389,6 +389,7 @@ func autoConvert_v1alpha2_LowNodeLoadArgs_To_config_LowNodeLoadArgs(in *LowNodeL } else { out.AnomalyCondition = nil } + out.DetectorCacheTimeout = (*v1.Duration)(unsafe.Pointer(in.DetectorCacheTimeout)) if in.NodePools != nil { in, out := &in.NodePools, &out.NodePools *out = make([]config.LowNodeLoadNodePool, len(*in)) @@ -435,6 +436,7 @@ func autoConvert_config_LowNodeLoadArgs_To_v1alpha2_LowNodeLoadArgs(in *config.L } else { out.AnomalyCondition = nil } + out.DetectorCacheTimeout = (*v1.Duration)(unsafe.Pointer(in.DetectorCacheTimeout)) if in.NodePools != nil { in, out := &in.NodePools, &out.NodePools *out = make([]LowNodeLoadNodePool, len(*in)) diff --git a/pkg/descheduler/apis/config/v1alpha2/zz_generated.deepcopy.go b/pkg/descheduler/apis/config/v1alpha2/zz_generated.deepcopy.go index 58ce772be..b86d6043b 100644 --- a/pkg/descheduler/apis/config/v1alpha2/zz_generated.deepcopy.go +++ b/pkg/descheduler/apis/config/v1alpha2/zz_generated.deepcopy.go @@ -237,6 +237,11 @@ func (in *LowNodeLoadArgs) DeepCopyInto(out *LowNodeLoadArgs) { *out = new(LoadAnomalyCondition) (*in).DeepCopyInto(*out) } + if in.DetectorCacheTimeout != nil { + in, out := &in.DetectorCacheTimeout, &out.DetectorCacheTimeout + *out = new(v1.Duration) + **out = **in + } if in.NodePools != nil { in, out := &in.NodePools, &out.NodePools *out = make([]LowNodeLoadNodePool, len(*in)) diff --git a/pkg/descheduler/apis/config/zz_generated.deepcopy.go b/pkg/descheduler/apis/config/zz_generated.deepcopy.go index ba2cc7bc7..890d26a46 100644 --- a/pkg/descheduler/apis/config/zz_generated.deepcopy.go +++ b/pkg/descheduler/apis/config/zz_generated.deepcopy.go @@ -213,6 +213,11 @@ func (in *LowNodeLoadArgs) DeepCopyInto(out *LowNodeLoadArgs) { *out = new(LoadAnomalyCondition) **out = **in } + if in.DetectorCacheTimeout != nil { + in, out := &in.DetectorCacheTimeout, &out.DetectorCacheTimeout + *out = new(v1.Duration) + **out = **in + } if in.NodePools != nil { in, out := &in.NodePools, &out.NodePools *out = make([]LowNodeLoadNodePool, len(*in)) diff --git a/pkg/descheduler/framework/plugins/loadaware/low_node_load.go b/pkg/descheduler/framework/plugins/loadaware/low_node_load.go index a5190ae89..f45df6c46 100644 --- a/pkg/descheduler/framework/plugins/loadaware/low_node_load.go +++ b/pkg/descheduler/framework/plugins/loadaware/low_node_load.go @@ -21,7 +21,6 @@ import ( "fmt" "sort" "strings" - "time" gocache "github.com/patrickmn/go-cache" corev1 "k8s.io/api/core/v1" @@ -107,7 +106,7 @@ func NewLowNodeLoad(args runtime.Object, handle framework.Handle) (framework.Plu koordSharedInformerFactory.Start(context.TODO().Done()) koordSharedInformerFactory.WaitForCacheSync(context.TODO().Done()) - nodeAnomalyDetectors := gocache.New(5*time.Minute, 5*time.Minute) + nodeAnomalyDetectors := gocache.New(loadLoadUtilizationArgs.DetectorCacheTimeout.Duration, loadLoadUtilizationArgs.DetectorCacheTimeout.Duration) return &LowNodeLoad{ handle: handle, diff --git a/pkg/descheduler/framework/plugins/loadaware/low_node_load_test.go b/pkg/descheduler/framework/plugins/loadaware/low_node_load_test.go index 61898073b..14d48cf09 100644 --- a/pkg/descheduler/framework/plugins/loadaware/low_node_load_test.go +++ b/pkg/descheduler/framework/plugins/loadaware/low_node_load_test.go @@ -1030,7 +1030,8 @@ func TestLowNodeLoad(t *testing.T) { }, }, }, - EvictableNamespaces: tt.evictableNamespaces, + DetectorCacheTimeout: &metav1.Duration{Duration: 5 * time.Minute}, + EvictableNamespaces: tt.evictableNamespaces, }, }) }, diff --git a/pkg/descheduler/framework/plugins/loadaware/utilization_util.go b/pkg/descheduler/framework/plugins/loadaware/utilization_util.go index c72f6cdc5..436038b18 100644 --- a/pkg/descheduler/framework/plugins/loadaware/utilization_util.go +++ b/pkg/descheduler/framework/plugins/loadaware/utilization_util.go @@ -360,7 +360,7 @@ func evictPods( "nodePool", nodePoolName, } for k, v := range nodeInfo.usage { - keysAndValues = append(keysAndValues, k, v.String()) + keysAndValues = append(keysAndValues, k.String(), v.String()) } for resourceName, quantity := range totalAvailableUsages { keysAndValues = append(keysAndValues, fmt.Sprintf("%s/totalAvailable", resourceName), quantity.String())