Skip to content

Commit

Permalink
Fixes an issue where resource model grades were incorrectly matched b…
Browse files Browse the repository at this point in the history
…ased on resource requests. Now only grades that can provide sufficient resources will be selected.

Signed-off-by: RainbowMango <qdurenhongcai@gmail.com>
  • Loading branch information
RainbowMango committed Oct 18, 2024
1 parent 29c97e9 commit 241a3a6
Show file tree
Hide file tree
Showing 2 changed files with 255 additions and 23 deletions.
48 changes: 25 additions & 23 deletions pkg/estimator/client/general.go
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ func getMaximumReplicasBasedOnClusterSummary(resourceSummary *clusterv1alpha1.Re
func getMaximumReplicasBasedOnResourceModels(cluster *clusterv1alpha1.Cluster, replicaRequirements *workv1alpha2.ReplicaRequirements) (int64, error) {
resourceModelsMinMap := convertToResourceModelsMinMap(cluster.Spec.ResourceModels)

minCompliantModelIndex := 0
minCompliantModelIndex := -1
for key, value := range replicaRequirements.ResourceRequest {
requestedQuantity := value.Value()
if requestedQuantity <= 0 {
Expand All @@ -209,28 +209,15 @@ func getMaximumReplicasBasedOnResourceModels(cluster *clusterv1alpha1.Cluster, r
return -1, fmt.Errorf("resource model is inapplicable as missing resource: %s", string(key))
}

for index, minValue := range quantityArray {
// Suppose there is the following resource model:
// Model1: cpu [1C,2C)
// Model2: cpu [2C,3C)
// if pod cpu request is 1.5C, we regard the nodes in model1 as meeting the requirements of the Pod.
// Suppose there is the following resource model:
// Model1: cpu [1C,2C), memory [1Gi,2Gi)
// Model2: cpu [2C,3C), memory [2Gi,3Gi)
// if pod cpu request is 1.5C and memory request is 2.5Gi
// We regard the node of model1 as not meeting the requirements, and the nodes of model2 and later as meeting the requirements.
if minValue.Cmp(value) > 0 {
// Since the 'min' value of the first model is always 0, hit here
// the index should be >=1, so it's safe to use 'index-1' here.
if index-1 > minCompliantModelIndex {
minCompliantModelIndex = index - 1
}
break
}

if index == len(quantityArray)-1 {
minCompliantModelIndex = index
}
// Find the minimum model grade for each type of resource quest, if no
// suitable model is found indicates that there is no appropriate model
// grade and return immediately.
minCompliantModelIndexForResource := minimumModelIndex(quantityArray, value)
if minCompliantModelIndexForResource == -1 {
return 0, nil
}
if minCompliantModelIndex <= minCompliantModelIndexForResource {
minCompliantModelIndex = minCompliantModelIndexForResource
}
}

Expand All @@ -244,3 +231,18 @@ func getMaximumReplicasBasedOnResourceModels(cluster *clusterv1alpha1.Cluster, r

return maximumReplicasForResource, nil
}

func minimumModelIndex(minimumGrades []resource.Quantity, requestValue resource.Quantity) int {
for index, minValue := range minimumGrades {
// Suppose there is the following resource model:
// Grade1: cpu [1C,2C)
// Grade2: cpu [2C,3C)
// If a Pod requests 1.5C of CPU, grade1 may not be able to provide sufficient resources,
// so we will choose grade2.
if minValue.Cmp(requestValue) >= 0 {
return index
}
}

return -1
}
230 changes: 230 additions & 0 deletions pkg/estimator/client/general_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
/*
Copyright 2024 The Karmada Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package client

import (
"testing"

corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"

clusterv1alpha1 "github.com/karmada-io/karmada/pkg/apis/cluster/v1alpha1"
workv1alpha2 "github.com/karmada-io/karmada/pkg/apis/work/v1alpha2"
)

func TestGetMaximumReplicasBasedOnResourceModels(t *testing.T) {
tests := []struct {
name string
cluster clusterv1alpha1.Cluster
replicaRequirements workv1alpha2.ReplicaRequirements
expectError bool
expectedReplicas int64
}{
{
name: "No grade defined should result in an error",
cluster: clusterv1alpha1.Cluster{},
replicaRequirements: workv1alpha2.ReplicaRequirements{
ResourceRequest: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
},
},
expectError: true,
expectedReplicas: -1,
},
{
name: "Partially compliant grades",
cluster: clusterv1alpha1.Cluster{
Spec: clusterv1alpha1.ClusterSpec{
ResourceModels: []clusterv1alpha1.ResourceModel{
{
Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")}},
},
{
Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")}},
},
{
Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")}},
},
},
},
Status: clusterv1alpha1.ClusterStatus{
ResourceSummary: &clusterv1alpha1.ResourceSummary{
AllocatableModelings: []clusterv1alpha1.AllocatableModeling{
{Grade: 0, Count: 1},
{Grade: 1, Count: 1},
{Grade: 2, Count: 1},
},
},
},
},
replicaRequirements: workv1alpha2.ReplicaRequirements{
ResourceRequest: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1.5"),
},
},
expectError: false,
expectedReplicas: 1,
},
{
name: "No compliant grades",
cluster: clusterv1alpha1.Cluster{
Spec: clusterv1alpha1.ClusterSpec{
ResourceModels: []clusterv1alpha1.ResourceModel{
{
Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")},
},
},
{
Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")},
},
},
{
Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")},
},
},
},
},
Status: clusterv1alpha1.ClusterStatus{
ResourceSummary: &clusterv1alpha1.ResourceSummary{
AllocatableModelings: []clusterv1alpha1.AllocatableModeling{
{Grade: 0, Count: 1},
{Grade: 1, Count: 1},
{Grade: 2, Count: 1},
},
},
},
},
replicaRequirements: workv1alpha2.ReplicaRequirements{
ResourceRequest: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("3"),
},
},
expectError: false,
expectedReplicas: 0,
},
{
name: "Multi resource request",
cluster: clusterv1alpha1.Cluster{
Spec: clusterv1alpha1.ClusterSpec{
ResourceModels: []clusterv1alpha1.ResourceModel{
{
Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")},
{Name: corev1.ResourceMemory, Min: resource.MustParse("0"), Max: resource.MustParse("1Gi")},
},
},
{
Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")},
{Name: corev1.ResourceMemory, Min: resource.MustParse("1Gi"), Max: resource.MustParse("2Gi")},
},
},
{
Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")},
{Name: corev1.ResourceMemory, Min: resource.MustParse("2Gi"), Max: resource.MustParse("4Gi")},
},
},
},
},
Status: clusterv1alpha1.ClusterStatus{
ResourceSummary: &clusterv1alpha1.ResourceSummary{
AllocatableModelings: []clusterv1alpha1.AllocatableModeling{
{Grade: 0, Count: 1},
{Grade: 1, Count: 1},
{Grade: 2, Count: 1},
},
},
},
},
replicaRequirements: workv1alpha2.ReplicaRequirements{
ResourceRequest: corev1.ResourceList{
// When looking CPU, grade 1 meets, then looking memory, grade 2 meets.
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("1.5Gi"),
},
},
expectError: false,
expectedReplicas: 1,
},
{
name: "request exceeds highest grade",
cluster: clusterv1alpha1.Cluster{
Spec: clusterv1alpha1.ClusterSpec{
ResourceModels: []clusterv1alpha1.ResourceModel{
{
Grade: 0, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("0"), Max: resource.MustParse("1")},
{Name: corev1.ResourceMemory, Min: resource.MustParse("0"), Max: resource.MustParse("1Gi")},
},
},
{
Grade: 1, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("1"), Max: resource.MustParse("2")},
{Name: corev1.ResourceMemory, Min: resource.MustParse("1Gi"), Max: resource.MustParse("2Gi")},
},
},
{
Grade: 2, Ranges: []clusterv1alpha1.ResourceModelRange{
{Name: corev1.ResourceCPU, Min: resource.MustParse("2"), Max: resource.MustParse("4")},
{Name: corev1.ResourceMemory, Min: resource.MustParse("2Gi"), Max: resource.MustParse("4Gi")},
},
},
},
},
Status: clusterv1alpha1.ClusterStatus{
ResourceSummary: &clusterv1alpha1.ResourceSummary{
AllocatableModelings: []clusterv1alpha1.AllocatableModeling{
{Grade: 0, Count: 1},
{Grade: 1, Count: 1},
{Grade: 2, Count: 1},
},
},
},
},
replicaRequirements: workv1alpha2.ReplicaRequirements{
ResourceRequest: corev1.ResourceList{
corev1.ResourceCPU: resource.MustParse("1"),
corev1.ResourceMemory: resource.MustParse("2.5Gi"), // no grade can provide sufficient memories.
},
},
expectError: false,
expectedReplicas: 0,
},
}

for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
replicas, err := getMaximumReplicasBasedOnResourceModels(&tt.cluster, &tt.replicaRequirements)
if tt.expectError && err == nil {
t.Errorf("Expects an error but got none")
}
if !tt.expectError && err != nil {
t.Errorf("getMaximumReplicasBasedOnResourceModels() returned an unexpected error: %v", err)
}
if replicas != tt.expectedReplicas {
t.Errorf("getMaximumReplicasBasedOnResourceModels() = %v, expectedReplicas %v", replicas, tt.expectedReplicas)
}
})
}
}

0 comments on commit 241a3a6

Please sign in to comment.