diff --git a/pkg/mock/mockcluster/mockcluster.go b/pkg/mock/mockcluster/mockcluster.go index 86169913ba5..3ccfb643840 100644 --- a/pkg/mock/mockcluster/mockcluster.go +++ b/pkg/mock/mockcluster/mockcluster.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/pd/pkg/mock/mockid" "github.com/pingcap/pd/pkg/mock/mockoption" "github.com/pingcap/pd/server/core" + "github.com/pingcap/pd/server/kv" "github.com/pingcap/pd/server/schedule/placement" "github.com/pingcap/pd/server/statistics" "go.uber.org/zap" @@ -42,10 +43,12 @@ type Cluster struct { // NewCluster creates a new Cluster func NewCluster(opt *mockoption.ScheduleOptions) *Cluster { + ruleManager, _ := placement.NewRuleManager(core.NewStorage(kv.NewMemoryKV()), opt.MaxReplicas, opt.GetLocationLabels()) return &Cluster{ BasicCluster: core.NewBasicCluster(), IDAllocator: mockid.NewIDAllocator(), ScheduleOptions: opt, + RuleManager: ruleManager, HotCache: statistics.NewHotCache(), StoresStats: statistics.NewStoresStats(), } diff --git a/server/cluster_test.go b/server/cluster_test.go index 991d6e8bdda..151af7a4f3d 100644 --- a/server/cluster_test.go +++ b/server/cluster_test.go @@ -29,6 +29,7 @@ import ( "github.com/pingcap/pd/pkg/testutil" "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/kv" + "github.com/pingcap/pd/server/schedule/opt" "github.com/pkg/errors" ) @@ -840,10 +841,10 @@ func (s *testRegionsInfoSuite) Test(c *C) { } for i := uint64(0); i < n; i++ { - region := cache.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}) + region := cluster.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}) c.Assert(region.GetLeader().GetStoreId(), Equals, i) - region = cache.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}) + region = cluster.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}) c.Assert(region.GetLeader().GetStoreId(), Not(Equals), i) c.Assert(region.GetStorePeer(i), NotNil) @@ -859,14 +860,14 @@ func (s *testRegionsInfoSuite) Test(c *C) { // All regions will be filtered out if they have pending peers. for i := uint64(0); i < n; i++ { for j := 0; j < cache.GetStoreLeaderCount(i); j++ { - region := cluster.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, core.HealthRegion()) + region := cluster.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, opt.HealthRegion(cluster)) newRegion := region.Clone(core.WithPendingPeers(region.GetPeers())) cache.SetRegion(newRegion) } - c.Assert(cluster.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, core.HealthRegion()), IsNil) + c.Assert(cluster.RandLeaderRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, opt.HealthRegion(cluster)), IsNil) } for i := uint64(0); i < n; i++ { - c.Assert(cluster.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, core.HealthRegion()), IsNil) + c.Assert(cluster.RandFollowerRegion(i, []core.KeyRange{core.NewKeyRange("", "")}, opt.HealthRegion(cluster)), IsNil) } } diff --git a/server/core/region_option.go b/server/core/region_option.go index 57ba8d5cefc..f0d1dd30c56 100644 --- a/server/core/region_option.go +++ b/server/core/region_option.go @@ -21,20 +21,6 @@ import ( // RegionOption is used to select region. type RegionOption func(region *RegionInfo) bool -// HealthRegion checks if the region is healthy. -func HealthRegion() RegionOption { - return func(region *RegionInfo) bool { - return len(region.downPeers) == 0 && len(region.pendingPeers) == 0 && len(region.learners) == 0 - } -} - -// HealthRegionAllowPending checks if the region is healthy with allowing the pending peer. -func HealthRegionAllowPending() RegionOption { - return func(region *RegionInfo) bool { - return len(region.downPeers) == 0 && len(region.learners) == 0 - } -} - // RegionCreateOption used to create region. type RegionCreateOption func(region *RegionInfo) @@ -55,7 +41,15 @@ func WithPendingPeers(pengdingPeers []*metapb.Peer) RegionCreateOption { // WithLearners sets the learners for the region. func WithLearners(learners []*metapb.Peer) RegionCreateOption { return func(region *RegionInfo) { - region.learners = learners + peers := region.meta.GetPeers() + for i := range peers { + for _, l := range learners { + if peers[i].GetId() == l.GetId() { + peers[i] = &metapb.Peer{Id: l.GetId(), StoreId: l.GetStoreId(), IsLearner: true} + break + } + } + } } } diff --git a/server/handler.go b/server/handler.go index a16d63b4597..cada063e48e 100644 --- a/server/handler.go +++ b/server/handler.go @@ -30,6 +30,7 @@ import ( "github.com/pingcap/pd/server/core" "github.com/pingcap/pd/server/schedule" "github.com/pingcap/pd/server/schedule/operator" + "github.com/pingcap/pd/server/schedule/opt" "github.com/pingcap/pd/server/statistics" "github.com/pkg/errors" "go.uber.org/zap" @@ -612,13 +613,11 @@ func (h *Handler) AddMergeRegionOperator(regionID uint64, targetID uint64) error return ErrRegionNotFound(targetID) } - if len(region.GetDownPeers()) > 0 || len(region.GetPendingPeers()) > 0 || len(region.GetLearners()) > 0 || - len(region.GetPeers()) != c.cluster.GetMaxReplicas() { + if !opt.IsRegionHealthy(c.cluster, region) || !opt.IsRegionReplicated(c.cluster, region) { return ErrRegionAbnormalPeer(regionID) } - if len(target.GetDownPeers()) > 0 || len(target.GetPendingPeers()) > 0 || len(target.GetLearners()) > 0 || - len(target.GetMeta().GetPeers()) != c.cluster.GetMaxReplicas() { + if !opt.IsRegionHealthy(c.cluster, target) || !opt.IsRegionReplicated(c.cluster, target) { return ErrRegionAbnormalPeer(targetID) } diff --git a/server/schedule/checker/merge_checker.go b/server/schedule/checker/merge_checker.go index 1e37b8530b6..c11314b6e04 100644 --- a/server/schedule/checker/merge_checker.go +++ b/server/schedule/checker/merge_checker.go @@ -83,12 +83,12 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { } // skip region has down peers or pending peers or learner peers - if len(region.GetDownPeers()) > 0 || len(region.GetPendingPeers()) > 0 || len(region.GetLearners()) > 0 { + if !opt.IsRegionHealthy(m.cluster, region) { checkerCounter.WithLabelValues("merge_checker", "special-peer").Inc() return nil } - if len(region.GetPeers()) != m.cluster.GetMaxReplicas() { + if !opt.IsRegionReplicated(m.cluster, region) { checkerCounter.WithLabelValues("merge_checker", "abnormal-replica").Inc() return nil } @@ -132,8 +132,7 @@ func (m *MergeChecker) Check(region *core.RegionInfo) []*operator.Operator { func (m *MergeChecker) checkTarget(region, adjacent *core.RegionInfo) bool { return adjacent != nil && !m.cluster.IsRegionHot(adjacent) && m.allowMerge(region, adjacent) && - len(adjacent.GetDownPeers()) == 0 && len(adjacent.GetPendingPeers()) == 0 && len(adjacent.GetLearners()) == 0 && // no special peer - len(adjacent.GetPeers()) == m.cluster.GetMaxReplicas() // peer count should equal + opt.IsRegionHealthy(m.cluster, adjacent) && opt.IsRegionReplicated(m.cluster, adjacent) } // allowMerge returns true if two regions can be merged according to the key type. diff --git a/server/schedule/opt/healthy.go b/server/schedule/opt/healthy.go new file mode 100644 index 00000000000..6fbe2dc5ddf --- /dev/null +++ b/server/schedule/opt/healthy.go @@ -0,0 +1,61 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package opt + +import "github.com/pingcap/pd/server/core" + +// IsRegionHealthy checks if a region is healthy for scheduling. It requires the +// region does not have any down or pending peers. And when placement rules +// feature is disabled, it requires the region does not have any learner peer. +func IsRegionHealthy(cluster Cluster, region *core.RegionInfo) bool { + return IsHealthyAllowPending(cluster, region) && len(region.GetPendingPeers()) == 0 +} + +// IsHealthyAllowPending checks if a region is healthy for scheduling. +// Differs from IsRegionHealthy, it allows the region to have pending peers. +func IsHealthyAllowPending(cluster Cluster, region *core.RegionInfo) bool { + if !cluster.IsPlacementRulesEnabled() && len(region.GetLearners()) > 0 { + return false + } + return len(region.GetDownPeers()) == 0 +} + +// HealthRegion returns a function that checks if a region is healthy for +// scheduling. It requires the region does not have any down or pending peers, +// and does not have any learner peers when placement rules is disabled. +func HealthRegion(cluster Cluster) func(*core.RegionInfo) bool { + return func(region *core.RegionInfo) bool { return IsRegionHealthy(cluster, region) } +} + +// HealthAllowPending returns a function that checks if a region is +// healthy for scheduling. Differs from HealthRegion, it allows the region +// to have pending peers. +func HealthAllowPending(cluster Cluster) func(*core.RegionInfo) bool { + return func(region *core.RegionInfo) bool { return IsHealthyAllowPending(cluster, region) } +} + +// IsRegionReplicated checks if a region is fully replicated. When placement +// rules is enabled, its peers should fit corresponding rules. When placement +// rules is disabled, it should have enough replicas and no any learner peer. +func IsRegionReplicated(cluster Cluster, region *core.RegionInfo) bool { + if cluster.IsPlacementRulesEnabled() { + return cluster.FitRegion(region).IsSatisfied() + } + return len(region.GetLearners()) == 0 && len(region.GetPeers()) == cluster.GetMaxReplicas() +} + +// ReplicatedRegion returns a function that checks if a region is fully replicated. +func ReplicatedRegion(cluster Cluster) func(*core.RegionInfo) bool { + return func(region *core.RegionInfo) bool { return IsRegionReplicated(cluster, region) } +} diff --git a/server/schedule/opt/healthy_test.go b/server/schedule/opt/healthy_test.go new file mode 100644 index 00000000000..590ee92972d --- /dev/null +++ b/server/schedule/opt/healthy_test.go @@ -0,0 +1,91 @@ +// Copyright 2019 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// See the License for the specific language governing permissions and +// limitations under the License. + +package opt + +import ( + "testing" + + . "github.com/pingcap/check" + "github.com/pingcap/kvproto/pkg/metapb" + "github.com/pingcap/kvproto/pkg/pdpb" + "github.com/pingcap/pd/pkg/mock/mockcluster" + "github.com/pingcap/pd/pkg/mock/mockoption" + "github.com/pingcap/pd/server/core" +) + +func TestOpt(t *testing.T) { + TestingT(t) +} + +var _ = Suite(&testRegionHealthySuite{}) + +type testRegionHealthySuite struct{} + +func (s *testRegionHealthySuite) TestIsRegionHealthy(c *C) { + c.Skip("enable it after rule fit merged") + + peers := func(ids ...uint64) []*metapb.Peer { + var peers []*metapb.Peer + for _, id := range ids { + p := &metapb.Peer{ + Id: id, + StoreId: id, + } + peers = append(peers, p) + } + return peers + } + + region := func(peers []*metapb.Peer, opts ...core.RegionCreateOption) *core.RegionInfo { + return core.NewRegionInfo(&metapb.Region{Peers: peers}, peers[0], opts...) + } + + type testCase struct { + region *core.RegionInfo + // disable placement rules + healthy1 bool + healthyAllowPending1 bool + replicated1 bool + // enable placement rules + healthy2 bool + healthyAllowPending2 bool + replicated2 bool + } + + cases := []testCase{ + {region(peers(1, 2, 3)), true, true, true, true, true, true}, + {region(peers(1, 2, 3), core.WithPendingPeers(peers(1))), false, true, true, false, true, true}, + {region(peers(1, 2, 3), core.WithLearners(peers(1))), false, false, false, true, true, false}, + {region(peers(1, 2, 3), core.WithDownPeers([]*pdpb.PeerStats{{Peer: peers(1)[0]}})), false, false, true, false, false, true}, + {region(peers(1, 2)), true, true, false, true, true, false}, + {region(peers(1, 2, 3, 4), core.WithLearners(peers(1))), false, false, false, true, true, false}, + } + + opt := mockoption.NewScheduleOptions() + tc := mockcluster.NewCluster(opt) + tc.AddRegionStore(1, 1) + tc.AddRegionStore(2, 1) + tc.AddRegionStore(3, 1) + tc.AddRegionStore(4, 1) + for _, t := range cases { + opt.EnablePlacementRules = false + c.Assert(IsRegionHealthy(tc, t.region), Equals, t.healthy1) + c.Assert(IsHealthyAllowPending(tc, t.region), Equals, t.healthyAllowPending1) + c.Assert(IsRegionReplicated(tc, t.region), Equals, t.replicated1) + opt.EnablePlacementRules = true + c.Assert(IsRegionHealthy(tc, t.region), Equals, t.healthy2) + c.Assert(IsHealthyAllowPending(tc, t.region), Equals, t.healthyAllowPending2) + c.Assert(IsRegionReplicated(tc, t.region), Equals, t.replicated2) + } +} diff --git a/server/schedule/region_scatterer.go b/server/schedule/region_scatterer.go index 7424a9a9300..c9f67e25e51 100644 --- a/server/schedule/region_scatterer.go +++ b/server/schedule/region_scatterer.go @@ -89,8 +89,8 @@ func NewRegionScatterer(cluster opt.Cluster) *RegionScatterer { // Scatter relocates the region. func (r *RegionScatterer) Scatter(region *core.RegionInfo) (*operator.Operator, error) { - if len(region.GetPeers()) != r.cluster.GetMaxReplicas() { - return nil, errors.Errorf("the number replicas of region %d is not expected", region.GetID()) + if !opt.IsRegionReplicated(r.cluster, region) { + return nil, errors.Errorf("region %d is not fully replicated", region.GetID()) } if region.GetLeader() == nil { diff --git a/server/schedulers/adjacent_region.go b/server/schedulers/adjacent_region.go index 6f5654f5e6f..2c29bc45b22 100644 --- a/server/schedulers/adjacent_region.go +++ b/server/schedulers/adjacent_region.go @@ -253,7 +253,7 @@ func (l *balanceAdjacentRegionScheduler) process(cluster opt.Cluster) []*operato } func (l *balanceAdjacentRegionScheduler) unsafeToBalance(cluster opt.Cluster, region *core.RegionInfo) bool { - if len(region.GetPeers()) != cluster.GetMaxReplicas() { + if !opt.IsRegionReplicated(cluster, region) { return true } storeID := region.GetLeader().GetStoreId() diff --git a/server/schedulers/balance_leader.go b/server/schedulers/balance_leader.go index e16ad7c053d..08af602a7d5 100644 --- a/server/schedulers/balance_leader.go +++ b/server/schedulers/balance_leader.go @@ -177,7 +177,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster opt.Cluster) []*operator.Opera // the best follower peer and transfers the leader. func (l *balanceLeaderScheduler) transferLeaderOut(cluster opt.Cluster, source *core.StoreInfo) []*operator.Operator { sourceID := source.GetID() - region := cluster.RandLeaderRegion(sourceID, l.conf.Ranges, core.HealthRegion()) + region := cluster.RandLeaderRegion(sourceID, l.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { log.Debug("store has no leader", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", sourceID)) schedulerCounter.WithLabelValues(l.GetName(), "no-leader-region").Inc() @@ -204,7 +204,7 @@ func (l *balanceLeaderScheduler) transferLeaderOut(cluster opt.Cluster, source * // the worst follower peer and transfers the leader. func (l *balanceLeaderScheduler) transferLeaderIn(cluster opt.Cluster, target *core.StoreInfo) []*operator.Operator { targetID := target.GetID() - region := cluster.RandFollowerRegion(targetID, l.conf.Ranges, core.HealthRegion()) + region := cluster.RandFollowerRegion(targetID, l.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { log.Debug("store has no follower", zap.String("scheduler", l.GetName()), zap.Uint64("store-id", targetID)) schedulerCounter.WithLabelValues(l.GetName(), "no-follower-region").Inc() diff --git a/server/schedulers/balance_region.go b/server/schedulers/balance_region.go index 6622c1970e1..212027e148c 100644 --- a/server/schedulers/balance_region.go +++ b/server/schedulers/balance_region.go @@ -135,14 +135,14 @@ func (s *balanceRegionScheduler) Schedule(cluster opt.Cluster) []*operator.Opera for i := 0; i < balanceRegionRetryLimit; i++ { // Priority picks the region that has a pending peer. // Pending region may means the disk is overload, remove the pending region firstly. - region := cluster.RandPendingRegion(sourceID, s.conf.Ranges, core.HealthRegionAllowPending()) + region := cluster.RandPendingRegion(sourceID, s.conf.Ranges, opt.HealthAllowPending(cluster), opt.ReplicatedRegion(cluster)) if region == nil { // Then picks the region that has a follower in the source store. - region = cluster.RandFollowerRegion(sourceID, s.conf.Ranges, core.HealthRegion()) + region = cluster.RandFollowerRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster)) } if region == nil { // Last, picks the region has the leader in the source store. - region = cluster.RandLeaderRegion(sourceID, s.conf.Ranges, core.HealthRegion()) + region = cluster.RandLeaderRegion(sourceID, s.conf.Ranges, opt.HealthRegion(cluster), opt.ReplicatedRegion(cluster)) } if region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-region").Inc() @@ -150,13 +150,6 @@ func (s *balanceRegionScheduler) Schedule(cluster opt.Cluster) []*operator.Opera } log.Debug("select region", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) - // We don't schedule region with abnormal number of replicas. - if len(region.GetPeers()) != cluster.GetMaxReplicas() { - log.Debug("region has abnormal replica count", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) - schedulerCounter.WithLabelValues(s.GetName(), "abnormal-replica").Inc() - continue - } - // Skip hot regions. if cluster.IsRegionHot(region) { log.Debug("region is hot", zap.String("scheduler", s.GetName()), zap.Uint64("region-id", region.GetID())) diff --git a/server/schedulers/evict_leader.go b/server/schedulers/evict_leader.go index 00da1e50a99..8ed9d7eba90 100644 --- a/server/schedulers/evict_leader.go +++ b/server/schedulers/evict_leader.go @@ -114,7 +114,7 @@ func (s *evictLeaderScheduler) IsScheduleAllowed(cluster opt.Cluster) bool { func (s *evictLeaderScheduler) Schedule(cluster opt.Cluster) []*operator.Operator { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() - region := cluster.RandLeaderRegion(s.conf.StoreID, s.conf.Ranges, core.HealthRegion()) + region := cluster.RandLeaderRegion(s.conf.StoreID, s.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-leader").Inc() return nil diff --git a/server/schedulers/grant_leader.go b/server/schedulers/grant_leader.go index 721ea066c51..323a9e60ea6 100644 --- a/server/schedulers/grant_leader.go +++ b/server/schedulers/grant_leader.go @@ -106,7 +106,7 @@ func (s *grantLeaderScheduler) IsScheduleAllowed(cluster opt.Cluster) bool { func (s *grantLeaderScheduler) Schedule(cluster opt.Cluster) []*operator.Operator { schedulerCounter.WithLabelValues(s.GetName(), "schedule").Inc() - region := cluster.RandFollowerRegion(s.conf.StoreID, s.conf.Ranges, core.HealthRegion()) + region := cluster.RandFollowerRegion(s.conf.StoreID, s.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-follower").Inc() return nil diff --git a/server/schedulers/hot_region.go b/server/schedulers/hot_region.go index 04b1364c437..2a7899c3e61 100644 --- a/server/schedulers/hot_region.go +++ b/server/schedulers/hot_region.go @@ -303,12 +303,12 @@ func (h *balanceHotRegionsScheduler) balanceByPeer(cluster opt.Cluster, storesSt continue } - if isRegionUnhealthy(srcRegion) { + if !opt.IsHealthyAllowPending(cluster, srcRegion) { schedulerCounter.WithLabelValues(h.GetName(), "unhealthy-replica").Inc() continue } - if len(srcRegion.GetPeers()) != cluster.GetMaxReplicas() { + if !opt.IsRegionReplicated(cluster, srcRegion) { log.Debug("region has abnormal replica count", zap.String("scheduler", h.GetName()), zap.Uint64("region-id", srcRegion.GetID())) schedulerCounter.WithLabelValues(h.GetName(), "abnormal-replica").Inc() continue @@ -375,7 +375,7 @@ func (h *balanceHotRegionsScheduler) balanceByLeader(cluster opt.Cluster, stores continue } - if isRegionUnhealthy(srcRegion) { + if !opt.IsHealthyAllowPending(cluster, srcRegion) { schedulerCounter.WithLabelValues(h.GetName(), "unhealthy-replica").Inc() continue } diff --git a/server/schedulers/random_merge.go b/server/schedulers/random_merge.go index 80574caa0f8..89c34f2ca80 100644 --- a/server/schedulers/random_merge.go +++ b/server/schedulers/random_merge.go @@ -100,7 +100,7 @@ func (s *randomMergeScheduler) Schedule(cluster opt.Cluster) []*operator.Operato schedulerCounter.WithLabelValues(s.GetName(), "no-source-store").Inc() return nil } - region := cluster.RandLeaderRegion(store.GetID(), s.conf.Ranges, core.HealthRegion()) + region := cluster.RandLeaderRegion(store.GetID(), s.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-region").Inc() return nil diff --git a/server/schedulers/shuffle_leader.go b/server/schedulers/shuffle_leader.go index f56ecfba38f..36e80e508d2 100644 --- a/server/schedulers/shuffle_leader.go +++ b/server/schedulers/shuffle_leader.go @@ -101,7 +101,7 @@ func (s *shuffleLeaderScheduler) Schedule(cluster opt.Cluster) []*operator.Opera schedulerCounter.WithLabelValues(s.GetName(), "no-target-store").Inc() return nil } - region := cluster.RandFollowerRegion(targetStore.GetID(), s.conf.Ranges, core.HealthRegion()) + region := cluster.RandFollowerRegion(targetStore.GetID(), s.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-follower").Inc() return nil diff --git a/server/schedulers/shuffle_region.go b/server/schedulers/shuffle_region.go index 1946bea53d0..9b1ee6bc298 100644 --- a/server/schedulers/shuffle_region.go +++ b/server/schedulers/shuffle_region.go @@ -126,9 +126,9 @@ func (s *shuffleRegionScheduler) scheduleRemovePeer(cluster opt.Cluster) (*core. return nil, nil } - region := cluster.RandFollowerRegion(source.GetID(), s.conf.Ranges, core.HealthRegion()) + region := cluster.RandFollowerRegion(source.GetID(), s.conf.Ranges, opt.HealthRegion(cluster)) if region == nil { - region = cluster.RandLeaderRegion(source.GetID(), s.conf.Ranges, core.HealthRegion()) + region = cluster.RandLeaderRegion(source.GetID(), s.conf.Ranges, opt.HealthRegion(cluster)) } if region == nil { schedulerCounter.WithLabelValues(s.GetName(), "no-region").Inc() diff --git a/server/schedulers/utils.go b/server/schedulers/utils.go index 85cc63c1d3c..800a124e560 100644 --- a/server/schedulers/utils.go +++ b/server/schedulers/utils.go @@ -59,10 +59,6 @@ func minDuration(a, b time.Duration) time.Duration { return b } -func isRegionUnhealthy(region *core.RegionInfo) bool { - return len(region.GetDownPeers()) != 0 || len(region.GetLearners()) != 0 -} - func shouldBalance(cluster opt.Cluster, source, target *core.StoreInfo, region *core.RegionInfo, kind core.ScheduleKind, opInfluence operator.OpInfluence, scheduleName string) bool { // The reason we use max(regionSize, averageRegionSize) to check is: // 1. prevent moving small regions between stores with close scores, leading to unnecessary balance. diff --git a/server/schedulers/utils_test.go b/server/schedulers/utils_test.go index 9836ee0a54a..3db2ee023fb 100644 --- a/server/schedulers/utils_test.go +++ b/server/schedulers/utils_test.go @@ -18,9 +18,6 @@ import ( "time" . "github.com/pingcap/check" - "github.com/pingcap/kvproto/pkg/metapb" - "github.com/pingcap/kvproto/pkg/pdpb" - "github.com/pingcap/pd/server/core" ) const ( @@ -53,32 +50,3 @@ func (s *testMinMaxSuite) TestMinDuration(c *C) { c.Assert(minDuration(time.Second, time.Minute), Equals, time.Second) c.Assert(minDuration(time.Second, time.Second), Equals, time.Second) } - -var _ = Suite(&testRegionUnhealthySuite{}) - -type testRegionUnhealthySuite struct{} - -func (s *testRegionUnhealthySuite) TestIsRegionUnhealthy(c *C) { - peers := make([]*metapb.Peer, 0, 3) - for i := uint64(0); i < 2; i++ { - p := &metapb.Peer{ - Id: i, - StoreId: i, - } - peers = append(peers, p) - } - peers = append(peers, &metapb.Peer{ - Id: 2, - StoreId: 2, - IsLearner: true, - }) - - r1 := core.NewRegionInfo(&metapb.Region{Peers: peers[:2]}, peers[0], core.WithDownPeers([]*pdpb.PeerStats{{Peer: peers[1]}})) - r2 := core.NewRegionInfo(&metapb.Region{Peers: peers[:2]}, peers[0], core.WithPendingPeers([]*metapb.Peer{peers[1]})) - r3 := core.NewRegionInfo(&metapb.Region{Peers: peers[:3]}, peers[0], core.WithLearners([]*metapb.Peer{peers[2]})) - r4 := core.NewRegionInfo(&metapb.Region{Peers: peers[:2]}, peers[0]) - c.Assert(isRegionUnhealthy(r1), IsTrue) - c.Assert(isRegionUnhealthy(r2), IsFalse) - c.Assert(isRegionUnhealthy(r3), IsTrue) - c.Assert(isRegionUnhealthy(r4), IsFalse) -}