Skip to content

Commit

Permalink
fix spreadconstraints[i].MaxGroups Invalidation when scaleup replicas
Browse files Browse the repository at this point in the history
Signed-off-by: huone1 <huwanxing@huawei.com>
  • Loading branch information
huone1 committed Feb 8, 2022
1 parent e0e7a60 commit 72a91ba
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 17 deletions.
20 changes: 11 additions & 9 deletions pkg/scheduler/core/division_algorithm.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func divideReplicasByResource(
} else if assignedReplicas < spec.Replicas {
// We need to enlarge the replicas in terms of the previous result (if exists).
// First scheduling is considered as a special kind of scaling up.
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference, assignedReplicas)
newTargetClusters, err := scaleUpScheduleByReplicaDivisionPreference(clusters, spec, preference)
if err != nil {
return nil, fmt.Errorf("failed to scaleUp: %v", err)
}
Expand Down Expand Up @@ -208,31 +208,33 @@ func scaleUpScheduleByReplicaDivisionPreference(
clusters []*clusterv1alpha1.Cluster,
spec *workv1alpha2.ResourceBindingSpec,
preference policyv1alpha1.ReplicaDivisionPreference,
assignedReplicas int32,
) ([]workv1alpha2.TargetCluster, error) {
// Step 1: Find the clusters that have old replicas, so we can prefer to assign new replicas towards them.
scheduledClusterNames := findOutScheduledCluster(spec.Clusters, clusters)
scheduledClusters := findOutScheduledCluster(spec.Clusters, clusters)

// Step 2: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
// Step 2: calculate the assigned Replicas in scheduledClusters
assignedReplicas := util.GetSumOfReplicas(scheduledClusters)
// Step 3: Get how many replicas should be scheduled in this cycle and construct a new object if necessary
newSpec := spec
if assignedReplicas > 0 {
newSpec = spec.DeepCopy()
newSpec.Replicas = spec.Replicas - assignedReplicas
}

// Step 3: Calculate available replicas of all candidates
// Step 4: Calculate available replicas of all candidates
clusterAvailableReplicas := calAvailableReplicas(clusters, newSpec)

// Step 4: Begin dividing.
// Step 5: Begin dividing.
// Only the new replicas are considered during this scheduler, the old replicas will not be moved.
// If not, the old replicas may be recreated which is not expected during scaling up.
// The parameter `scheduledClusterNames` is used to make sure that we assign new replicas to them preferentially
// so that all the replicas are aggregated.
result, err := divideReplicasByPreference(clusterAvailableReplicas, newSpec.Replicas, preference, scheduledClusterNames)
result, err := divideReplicasByPreference(clusterAvailableReplicas, newSpec.Replicas,
preference, util.ConvertToClusterNames(scheduledClusters))
if err != nil {
return result, err
}

// Step 5: Merge the result of previous and new results.
return util.MergeTargetClusters(spec.Clusters, result), nil
// Step 6: Merge the result of previous and new results.
return util.MergeTargetClusters(scheduledClusters, result), nil
}
6 changes: 3 additions & 3 deletions pkg/scheduler/core/division_algorithm_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -818,9 +818,9 @@ func Test_scaleScheduling(t *testing.T) {
preference: policyv1alpha1.ReplicaDivisionPreferenceAggregated,
},
want: []workv1alpha2.TargetCluster{
{Name: ClusterMember1, Replicas: 7},
{Name: ClusterMember2, Replicas: 8},
{Name: ClusterMember4, Replicas: 9},
{Name: ClusterMember1, Replicas: 8},
{Name: ClusterMember3, Replicas: 6},
{Name: ClusterMember4, Replicas: 10},
},
wantErr: false,
},
Expand Down
12 changes: 7 additions & 5 deletions pkg/scheduler/core/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,12 @@ func calAvailableReplicas(clusters []*clusterv1alpha1.Cluster, spec *workv1alpha

// findOutScheduledCluster will return a name set of clusters
// which are a part of `feasibleClusters` and have non-zero replicas.
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) sets.String {
res := sets.NewString()
func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clusterv1alpha1.Cluster) []workv1alpha2.TargetCluster {
validTarget := make([]workv1alpha2.TargetCluster, 0)
if len(tcs) == 0 {
return res
return validTarget
}

for _, targetCluster := range tcs {
// must have non-zero replicas
if targetCluster.Replicas <= 0 {
Expand All @@ -90,12 +91,13 @@ func findOutScheduledCluster(tcs []workv1alpha2.TargetCluster, candidates []*clu
// must in `candidates`
for _, cluster := range candidates {
if targetCluster.Name == cluster.Name {
res.Insert(targetCluster.Name)
validTarget = append(validTarget, targetCluster)
break
}
}
}
return res

return validTarget
}

// resortClusterList is used to make sure scheduledClusterNames are in front of the other clusters in the list of
Expand Down

0 comments on commit 72a91ba

Please sign in to comment.