Skip to content

Commit

Permalink
scheduler: do not balance the empty regions (#3344)
Browse files Browse the repository at this point in the history
* do not balance the empty regions

Signed-off-by: Ryan Leung <rleungx@gmail.com>

* add a threshold for balance empty region

Signed-off-by: rleungx <rleungx@gmail.com>

* address the comment

Signed-off-by: Ryan Leung <rleungx@gmail.com>

* Update server/schedule/opt/healthy.go

Signed-off-by: Ryan Leung <rleungx@gmail.com>

Co-authored-by: Ti Chi Robot <71242396+ti-chi-bot@users.noreply.github.com>
  • Loading branch information
rleungx and ti-chi-bot authored Jan 20, 2021
1 parent 02c5f77 commit a6f9d92
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 6 deletions.
7 changes: 6 additions & 1 deletion pkg/mock/mockcluster/mockcluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@ package mockcluster

import (
"fmt"
"strconv"
"time"

"github.com/gogo/protobuf/proto"
Expand Down Expand Up @@ -585,7 +586,11 @@ func (mc *Cluster) CheckLabelProperty(typ string, labels []*metapb.StoreLabel) b

// PutRegionStores mocks method.
func (mc *Cluster) PutRegionStores(id uint64, stores ...uint64) {
meta := &metapb.Region{Id: id}
meta := &metapb.Region{
Id: id,
StartKey: []byte(strconv.FormatUint(id, 10)),
EndKey: []byte(strconv.FormatUint(id+1, 10)),
}
for _, s := range stores {
meta.Peers = append(meta.Peers, &metapb.Peer{StoreId: s})
}
Expand Down
17 changes: 16 additions & 1 deletion server/schedule/opt/healthy.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,12 @@

package opt

import "github.com/tikv/pd/server/core"
import (
"github.com/tikv/pd/server/core"
)

// BalanceEmptyRegionThreshold is a threshold which allow balance the empty region if the region number is less than this threshold.
var balanceEmptyRegionThreshold = 50

// IsRegionHealthy checks if a region is healthy for scheduling. It requires the
// region does not have any down or pending peers. And when placement rules
Expand All @@ -31,6 +36,11 @@ func IsHealthyAllowPending(cluster Cluster, region *core.RegionInfo) bool {
return len(region.GetDownPeers()) == 0
}

// IsEmptyRegionAllowBalance checks if a region is an empty region and can be balanced.
func IsEmptyRegionAllowBalance(cluster Cluster, region *core.RegionInfo) bool {
return region.GetApproximateSize() > core.EmptyRegionApproximateSize || cluster.GetRegionCount() < balanceEmptyRegionThreshold
}

// HealthRegion returns a function that checks if a region is healthy for
// scheduling. It requires the region does not have any down or pending peers,
// and does not have any learner peers when placement rules is disabled.
Expand All @@ -45,6 +55,11 @@ func HealthAllowPending(cluster Cluster) func(*core.RegionInfo) bool {
return func(region *core.RegionInfo) bool { return IsHealthyAllowPending(cluster, region) }
}

// AllowBalanceEmptyRegion returns a function that checks if a region is an empty region and can be balanced.
func AllowBalanceEmptyRegion(cluster Cluster) func(*core.RegionInfo) bool {
return func(region *core.RegionInfo) bool { return IsEmptyRegionAllowBalance(cluster, region) }
}

// IsRegionReplicated checks if a region is fully replicated. When placement
// rules is enabled, its peers should fit corresponding rules. When placement
// rules is disabled, it should have enough replicas and no any learner peer.
Expand Down
8 changes: 4 additions & 4 deletions server/schedulers/balance_region.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,18 +152,18 @@ func (s *balanceRegionScheduler) Schedule(cluster opt.Cluster) []*operator.Opera
for i := 0; i < balanceRegionRetryLimit; i++ {
// Priority pick the region that has a pending peer.
// Pending region may means the disk is overload, remove the pending region firstly.
region := cluster.RandPendingRegion(sourceID, s.conf.Ranges, opt.HealthAllowPending(cluster), opt.ReplicatedRegion(cluster))
region := cluster.RandPendingRegion(sourceID, s.conf.Ranges, opt.HealthAllowPending(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster))
if region == nil {
// Then pick the region that has a follower in the source store.
region = cluster.RandFollowerRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster))
region = cluster.RandFollowerRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster))
}
if region == nil {
// Then pick the region has the leader in the source store.
region = cluster.RandLeaderRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster))
region = cluster.RandLeaderRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster))
}
if region == nil {
// Finally pick learner.
region = cluster.RandLearnerRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster))
region = cluster.RandLearnerRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster), opt.AllowBalanceEmptyRegion(cluster))
}
if region == nil {
schedulerCounter.WithLabelValues(s.GetName(), "no-region").Inc()
Expand Down
37 changes: 37 additions & 0 deletions server/schedulers/balance_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -932,6 +932,43 @@ func (s *testBalanceRegionSchedulerSuite) TestShouldNotBalance(c *C) {
}
}

func (s *testBalanceRegionSchedulerSuite) TestEmptyRegion(c *C) {
opt := config.NewTestOptions()
tc := mockcluster.NewCluster(opt)
tc.DisableFeature(versioninfo.JointConsensus)
oc := schedule.NewOperatorController(s.ctx, nil, nil)
sb, err := schedule.CreateScheduler(BalanceRegionType, oc, core.NewStorage(kv.NewMemoryKV()), schedule.ConfigSliceDecoder(BalanceRegionType, []string{"", ""}))
c.Assert(err, IsNil)
tc.AddRegionStore(1, 10)
tc.AddRegionStore(2, 9)
tc.AddRegionStore(3, 10)
tc.AddRegionStore(4, 10)
region := core.NewRegionInfo(
&metapb.Region{
Id: 5,
StartKey: []byte("a"),
EndKey: []byte("b"),
Peers: []*metapb.Peer{
{Id: 6, StoreId: 1},
{Id: 7, StoreId: 3},
{Id: 8, StoreId: 4},
},
},
&metapb.Peer{Id: 7, StoreId: 3},
core.SetApproximateSize(1),
core.SetApproximateKeys(1),
)
tc.PutRegion(region)
operators := sb.Schedule(tc)
c.Assert(operators, NotNil)

for i := uint64(10); i < 60; i++ {
tc.PutRegionStores(i, 1, 3, 4)
}
operators = sb.Schedule(tc)
c.Assert(operators, IsNil)
}

var _ = Suite(&testRandomMergeSchedulerSuite{})

type testRandomMergeSchedulerSuite struct{}
Expand Down

0 comments on commit a6f9d92

Please sign in to comment.