Skip to content

Commit

Permalink
scheduler: make evict-slow-trend scheduler can manually modify reco…
Browse files Browse the repository at this point in the history
…very duration. (#7132)

ref #7156, ref tikv/tikv#15271

With this pr, users can manually modify the minimal recovery time when encountering an I/O jitter case.

That is, only when the jitter is disappear and the recovery time reach this limit, 
can the given slow node be mark with normal for balancing leaders to it.

Signed-off-by: lucasliang <nkcs_lykx@hotmail.com>
  • Loading branch information
LykxSassinator authored Oct 12, 2023
1 parent 779b5be commit 0adb86f
Show file tree
Hide file tree
Showing 5 changed files with 118 additions and 20 deletions.
99 changes: 81 additions & 18 deletions pkg/schedule/schedulers/evict_slow_trend.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,12 @@
package schedulers

import (
"net/http"
"strconv"
"sync/atomic"
"time"

"github.com/gorilla/mux"
"github.com/pingcap/errors"
"github.com/pingcap/failpoint"
"github.com/pingcap/log"
Expand All @@ -26,6 +29,8 @@ import (
"github.com/tikv/pd/pkg/schedule/operator"
"github.com/tikv/pd/pkg/schedule/plan"
"github.com/tikv/pd/pkg/storage/endpoint"
"github.com/tikv/pd/pkg/utils/apiutil"
"github.com/unrolled/render"
"go.uber.org/zap"
)

Expand Down Expand Up @@ -54,11 +59,28 @@ type evictSlowTrendSchedulerConfig struct {
evictCandidate slowCandidate
// Last chosen candidate for eviction.
lastEvictCandidate slowCandidate

// Duration gap for recovering the candidate, unit: s.
RecoveryDurationGap uint64 `json:"recovery-duration"`
// Only evict one store for now
EvictedStores []uint64 `json:"evict-by-trend-stores"`
}

func initEvictSlowTrendSchedulerConfig(storage endpoint.ConfigStorage) *evictSlowTrendSchedulerConfig {
return &evictSlowTrendSchedulerConfig{
storage: storage,
evictCandidate: slowCandidate{},
lastEvictCandidate: slowCandidate{},
RecoveryDurationGap: defaultRecoveryDurationGap,
EvictedStores: make([]uint64, 0),
}
}

func (conf *evictSlowTrendSchedulerConfig) Clone() *evictSlowTrendSchedulerConfig {
return &evictSlowTrendSchedulerConfig{
RecoveryDurationGap: atomic.LoadUint64(&conf.RecoveryDurationGap),
}
}

func (conf *evictSlowTrendSchedulerConfig) Persist() error {
name := conf.getSchedulerName()
data, err := EncodeConfig(conf)
Expand Down Expand Up @@ -116,6 +138,15 @@ func (conf *evictSlowTrendSchedulerConfig) lastCandidateCapturedSecs() uint64 {
return DurationSinceAsSecs(conf.lastEvictCandidate.captureTS)
}

// readyForRecovery checks whether the last cpatured candidate is ready for recovery.
func (conf *evictSlowTrendSchedulerConfig) readyForRecovery() bool {
recoveryDurationGap := atomic.LoadUint64(&conf.RecoveryDurationGap)
failpoint.Inject("transientRecoveryGap", func() {
recoveryDurationGap = 0
})
return conf.lastCandidateCapturedSecs() >= recoveryDurationGap
}

func (conf *evictSlowTrendSchedulerConfig) captureCandidate(id uint64) {
conf.evictCandidate = slowCandidate{
storeID: id,
Expand Down Expand Up @@ -162,9 +193,52 @@ func (conf *evictSlowTrendSchedulerConfig) clearAndPersist(cluster sche.Schedule
return oldID, conf.Persist()
}

type evictSlowTrendHandler struct {
rd *render.Render
config *evictSlowTrendSchedulerConfig
}

func newEvictSlowTrendHandler(config *evictSlowTrendSchedulerConfig) http.Handler {
h := &evictSlowTrendHandler{
config: config,
rd: render.New(render.Options{IndentJSON: true}),
}
router := mux.NewRouter()
router.HandleFunc("/config", h.UpdateConfig).Methods(http.MethodPost)
router.HandleFunc("/list", h.ListConfig).Methods(http.MethodGet)
return router
}

func (handler *evictSlowTrendHandler) UpdateConfig(w http.ResponseWriter, r *http.Request) {
var input map[string]interface{}
if err := apiutil.ReadJSONRespondError(handler.rd, w, r.Body, &input); err != nil {
return
}
recoveryDurationGapFloat, ok := input["recovery-duration"].(float64)
if !ok {
handler.rd.JSON(w, http.StatusInternalServerError, errors.New("invalid argument for 'recovery-duration'").Error())
return
}
recoveryDurationGap := (uint64)(recoveryDurationGapFloat)
prevRecoveryDurationGap := atomic.LoadUint64(&handler.config.RecoveryDurationGap)
atomic.StoreUint64(&handler.config.RecoveryDurationGap, recoveryDurationGap)
log.Info("evict-slow-trend-scheduler update 'recovery-duration' - unit: s", zap.Uint64("prev", prevRecoveryDurationGap), zap.Uint64("cur", recoveryDurationGap))
handler.rd.JSON(w, http.StatusOK, nil)
}

func (handler *evictSlowTrendHandler) ListConfig(w http.ResponseWriter, r *http.Request) {
conf := handler.config.Clone()
handler.rd.JSON(w, http.StatusOK, conf)
}

type evictSlowTrendScheduler struct {
*BaseScheduler
conf *evictSlowTrendSchedulerConfig
conf *evictSlowTrendSchedulerConfig
handler http.Handler
}

func (s *evictSlowTrendScheduler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
s.handler.ServeHTTP(w, r)
}

func (s *evictSlowTrendScheduler) GetName() string {
Expand Down Expand Up @@ -244,7 +318,7 @@ func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, dryRun
// slow node next time.
log.Info("store evicted by slow trend has been removed", zap.Uint64("store-id", store.GetID()))
storeSlowTrendActionStatusGauge.WithLabelValues("evict", "stop_removed").Inc()
} else if checkStoreCanRecover(cluster, store, s.conf.lastCandidateCapturedSecs()) {
} else if checkStoreCanRecover(cluster, store) && s.conf.readyForRecovery() {
log.Info("store evicted by slow trend has been recovered", zap.Uint64("store-id", store.GetID()))
storeSlowTrendActionStatusGauge.WithLabelValues("evict", "stop_recovered").Inc()
} else {
Expand Down Expand Up @@ -301,9 +375,11 @@ func (s *evictSlowTrendScheduler) Schedule(cluster sche.SchedulerCluster, dryRun
}

func newEvictSlowTrendScheduler(opController *operator.Controller, conf *evictSlowTrendSchedulerConfig) Scheduler {
handler := newEvictSlowTrendHandler(conf)
return &evictSlowTrendScheduler{
BaseScheduler: NewBaseScheduler(opController),
conf: conf,
handler: handler,
}
}

Expand Down Expand Up @@ -453,7 +529,7 @@ func checkStoreSlowerThanOthers(cluster sche.SchedulerCluster, target *core.Stor
return slowerThanStoresNum >= expected
}

func checkStoreCanRecover(cluster sche.SchedulerCluster, target *core.StoreInfo, recoveryGap uint64) bool {
func checkStoreCanRecover(cluster sche.SchedulerCluster, target *core.StoreInfo) bool {
/*
//
// This might not be necessary,
Expand All @@ -473,7 +549,7 @@ func checkStoreCanRecover(cluster sche.SchedulerCluster, target *core.StoreInfo,
storeSlowTrendActionStatusGauge.WithLabelValues("recover.judging:got-event").Inc()
}
*/
return checkStoreFasterThanOthers(cluster, target) && checkStoreReadyForRecover(target, recoveryGap)
return checkStoreFasterThanOthers(cluster, target)
}

func checkStoreFasterThanOthers(cluster sche.SchedulerCluster, target *core.StoreInfo) bool {
Expand Down Expand Up @@ -507,19 +583,6 @@ func checkStoreFasterThanOthers(cluster sche.SchedulerCluster, target *core.Stor
return fasterThanStores >= expected
}

// checkStoreReadyForRecover checks whether the given target store is ready for recover.
func checkStoreReadyForRecover(target *core.StoreInfo, recoveryGap uint64) bool {
durationGap := uint64(defaultRecoveryDurationGap)
failpoint.Inject("transientRecoveryGap", func() {
durationGap = 0
})
if targetSlowTrend := target.GetSlowTrend(); targetSlowTrend != nil {
// TODO: setting the recovery time in SlowTrend
return recoveryGap >= durationGap
}
return true
}

// DurationSinceAsSecs returns the duration gap since the given startTS, unit: s.
func DurationSinceAsSecs(startTS time.Time) uint64 {
return uint64(time.Since(startTS).Seconds())
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/evict_slow_trend_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func (suite *evictSlowTrendTestSuite) TestEvictSlowTrendBasicFuncs() {
suite.Equal(*lastCapturedCandidate, es2.conf.evictCandidate)
suite.Equal(es2.conf.candidateCapturedSecs(), uint64(0))
suite.Equal(es2.conf.lastCandidateCapturedSecs(), uint64(0))
suite.False(checkStoreReadyForRecover(store, es2.conf.lastCandidateCapturedSecs()))
suite.False(es2.conf.readyForRecovery())
recoverTS := lastCapturedCandidate.recoverTS
suite.True(recoverTS.After(lastCapturedCandidate.captureTS))
// Pop captured store 1 and mark it has recovered.
Expand Down
2 changes: 1 addition & 1 deletion pkg/schedule/schedulers/init.go
Original file line number Diff line number Diff line change
Expand Up @@ -466,7 +466,7 @@ func schedulersRegister() {
})

RegisterScheduler(EvictSlowTrendType, func(opController *operator.Controller, storage endpoint.ConfigStorage, decoder ConfigDecoder, removeSchedulerCb ...func(string) error) (Scheduler, error) {
conf := &evictSlowTrendSchedulerConfig{storage: storage, EvictedStores: make([]uint64, 0), evictCandidate: slowCandidate{}, lastEvictCandidate: slowCandidate{}}
conf := initEvictSlowTrendSchedulerConfig(storage)
if err := decoder(conf); err != nil {
return nil, err
}
Expand Down
15 changes: 15 additions & 0 deletions tests/pdctl/scheduler/scheduler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -407,6 +407,21 @@ func (suite *schedulerTestSuite) checkScheduler(cluster *tests.TestCluster) {
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "add", "balance-leader-scheduler"}, nil)
re.Contains(echo, "Success!")

// test evict-slow-trend scheduler config
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "add", "evict-slow-trend-scheduler"}, nil)
re.Contains(echo, "Success!")
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, nil)
re.Contains(echo, "evict-slow-trend-scheduler")
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "evict-slow-trend-scheduler", "set", "recovery-duration", "100"}, nil)
re.Contains(echo, "Success!")
conf = make(map[string]interface{})
mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "config", "evict-slow-trend-scheduler", "show"}, &conf)
re.Equal(100., conf["recovery-duration"])
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "remove", "evict-slow-trend-scheduler"}, nil)
re.Contains(echo, "Success!")
echo = mustExec(re, cmd, []string{"-u", pdAddr, "scheduler", "show"}, nil)
re.NotContains(echo, "evict-slow-trend-scheduler")

// test show scheduler with paused and disabled status.
checkSchedulerWithStatusCommand := func(status string, expected []string) {
var schedulers []string
Expand Down
20 changes: 20 additions & 0 deletions tools/pd-ctl/pdctl/command/scheduler.go
Original file line number Diff line number Diff line change
Expand Up @@ -499,6 +499,7 @@ func NewConfigSchedulerCommand() *cobra.Command {
newConfigGrantHotRegionCommand(),
newConfigBalanceLeaderCommand(),
newSplitBucketCommand(),
newConfigEvictSlowTrendCommand(),
)
return c
}
Expand Down Expand Up @@ -775,6 +776,25 @@ func setShuffleRegionSchedulerRolesCommandFunc(cmd *cobra.Command, args []string
cmd.Println("Success!")
}

func newConfigEvictSlowTrendCommand() *cobra.Command {
c := &cobra.Command{
Use: "evict-slow-trend-scheduler",
Short: "evict-slow-trend-scheduler config",
Run: listSchedulerConfigCommandFunc,
}

c.AddCommand(&cobra.Command{
Use: "show",
Short: "list the config item",
Run: listSchedulerConfigCommandFunc,
}, &cobra.Command{
Use: "set <key> <value>",
Short: "set the config item",
Run: func(cmd *cobra.Command, args []string) { postSchedulerConfigCommandFunc(cmd, c.Name(), args) },
})
return c
}

// NewDescribeSchedulerCommand returns command to describe the scheduler.
func NewDescribeSchedulerCommand() *cobra.Command {
c := &cobra.Command{
Expand Down

0 comments on commit 0adb86f

Please sign in to comment.