Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix multiple overflow errors in exponential backoff #18200

Merged
merged 8 commits into from
Aug 15, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .changelog/18200.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
```release-note:bug
core: Fixed a bug where exponential backoff could result in excessive CPU usage
```
11 changes: 7 additions & 4 deletions client/allocrunner/taskrunner/stats_hook.go
Original file line number Diff line number Diff line change
Expand Up @@ -128,6 +128,8 @@ MAIN:
// It logs the errors with appropriate log levels; don't log returned error
func (h *statsHook) callStatsWithRetry(ctx context.Context, handle interfaces.DriverStats) (<-chan *cstructs.TaskResourceUsage, error) {
var retry int
backoff := time.Duration(0)
limit := time.Second * 5

MAIN:
if ctx.Err() != nil {
Expand Down Expand Up @@ -162,10 +164,11 @@ MAIN:
h.logger.Error("failed to start stats collection for task", "error", err)
}

limit := time.Second * 5
backoff := 1 << (2 * uint64(retry)) * time.Second
if backoff > limit || retry > 5 {
backoff = limit
if backoff < limit {
backoff = 1 << (2 * uint64(retry)) * time.Second
if backoff > limit || retry > 5 {
backoff = limit
}
}

// Increment retry counter
Expand Down
12 changes: 7 additions & 5 deletions client/allocrunner/taskrunner/vault_hook.go
Original file line number Diff line number Diff line change
Expand Up @@ -312,6 +312,7 @@ OUTER:
// returns the Vault token and whether the manager should exit.
func (h *vaultHook) deriveVaultToken() (token string, exit bool) {
attempts := 0
backoff := time.Duration(0)
for {
tokens, err := h.client.DeriveToken(h.alloc, []string{h.taskName})
if err == nil {
Expand Down Expand Up @@ -339,14 +340,15 @@ func (h *vaultHook) deriveVaultToken() (token string, exit bool) {
}

// Handle the retry case
backoff := (1 << (2 * uint64(attempts))) * vaultBackoffBaseline
if backoff > vaultBackoffLimit {
backoff = vaultBackoffLimit
if backoff < vaultBackoffLimit {
backoff = (1 << (2 * uint64(attempts))) * vaultBackoffBaseline
if backoff > vaultBackoffLimit {
backoff = vaultBackoffLimit
}
attempts++
}
h.logger.Error("failed to derive Vault token", "error", err, "recoverable", true, "backoff", backoff)

attempts++

// Wait till retrying
select {
case <-h.ctx.Done():
Expand Down
13 changes: 8 additions & 5 deletions client/devicemanager/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,7 @@ START:

var sresp *device.StatsResponse
var ok bool
backoff := time.Duration(0)
for {
select {
case <-i.ctx.Done():
Expand All @@ -494,12 +495,14 @@ START:
goto START
}

// Retry with an exponential backoff
backoff := (1 << (2 * uint64(attempt))) * statsBackoffBaseline
if backoff > statsBackoffLimit {
backoff = statsBackoffLimit
// Retry with an exponential backoff, avoiding overflow
if backoff < statsBackoffLimit {
backoff = (1 << (2 * uint64(attempt))) * statsBackoffBaseline
if backoff > statsBackoffLimit {
backoff = statsBackoffLimit
}
attempt++
}
attempt++

i.logger.Error("stats returned an error", "error", err, "retry", backoff)

Expand Down
21 changes: 12 additions & 9 deletions client/pluginmanager/drivermanager/instance.go
Original file line number Diff line number Diff line change
Expand Up @@ -329,12 +329,13 @@ func (i *instanceManager) fingerprint() {
i.handleFingerprintError()

// Calculate the new backoff
backoff = (1 << (2 * uint64(retry))) * driverFPBackoffBaseline
if backoff > driverFPBackoffLimit {
backoff = driverFPBackoffLimit
if backoff < driverFPBackoffLimit {
backoff = (1 << (2 * uint64(retry))) * driverFPBackoffBaseline
if backoff > driverFPBackoffLimit {
backoff = driverFPBackoffLimit
}
retry++
}
// Increment retry counter
retry++
continue
}
cancel()
Expand Down Expand Up @@ -453,11 +454,13 @@ func (i *instanceManager) handleEvents() {
i.logger.Warn("failed to receive task events, retrying", "error", err, "retry", retry)

// Calculate the new backoff
backoff = (1 << (2 * uint64(retry))) * driverFPBackoffBaseline
if backoff > driverFPBackoffLimit {
backoff = driverFPBackoffLimit
if backoff < driverFPBackoffLimit {
backoff = (1 << (2 * uint64(retry))) * driverFPBackoffBaseline
if backoff > driverFPBackoffLimit {
backoff = driverFPBackoffLimit
}
retry++
}
retry++
continue
}
cancel()
Expand Down
13 changes: 8 additions & 5 deletions command/agent/consul/version_checker.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ func checkConsulTLSSkipVerify(ctx context.Context, logger log.Logger, client Age

timer, stop := helper.NewSafeTimer(limit)
defer stop()
backoff := time.Duration(0)

for {
self, err := client.Self()
Expand All @@ -40,11 +41,13 @@ func checkConsulTLSSkipVerify(ctx context.Context, logger log.Logger, client Age
return
}

backoff := (1 << (2 * i)) * baseline
if backoff > limit {
backoff = limit
} else {
i++
if backoff < limit {
backoff = (1 << (2 * i)) * baseline
if backoff > limit {
backoff = limit
} else {
i++
}
}

timer.Reset(backoff)
Expand Down
3 changes: 2 additions & 1 deletion drivers/docker/driver.go
Original file line number Diff line number Diff line change
Expand Up @@ -572,7 +572,8 @@ START:
return recoverableErrTimeouts(startErr)
}

// nextBackoff returns appropriate docker backoff durations after attempted attempts.
// nextBackoff returns appropriate docker backoff durations after attempted
// attempts. Note that the caller must cap attempted so that it doesn't overflow
func nextBackoff(attempted int) time.Duration {
tgross marked this conversation as resolved.
Show resolved Hide resolved
// attempts in 200ms, 800ms, 3.2s, 12.8s, 51.2s
// TODO: add randomization factor and extract to a helper
Expand Down
11 changes: 6 additions & 5 deletions drivers/docker/stats.go
Original file line number Diff line number Diff line change
Expand Up @@ -137,12 +137,13 @@ func (h *taskHandle) collectStats(ctx context.Context, destCh *usageSender, inte
h.logger.Debug("error collecting stats from container", "error", err)

// Calculate the new backoff
backoff = (1 << (2 * uint64(retry))) * statsCollectorBackoffBaseline
if backoff > statsCollectorBackoffLimit {
backoff = statsCollectorBackoffLimit
if backoff < statsCollectorBackoffLimit {
backoff = (1 << (2 * uint64(retry))) * statsCollectorBackoffBaseline
if backoff > statsCollectorBackoffLimit {
backoff = statsCollectorBackoffLimit
}
retry++
}
// Increment retry counter
retry++
continue
}
// Stats finished either because context was canceled, doneCh was closed
Expand Down
9 changes: 6 additions & 3 deletions nomad/state/state_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,7 @@ func (s *StateStore) SnapshotMinIndex(ctx context.Context, index uint64) (*State
const backoffLimit = 1 * time.Second
var retries uint
var retryTimer *time.Timer
deadline := time.Duration(0)

// XXX: Potential optimization is to set up a watch on the state
// store's index table and only unblock via a trigger rather than
Expand All @@ -279,9 +280,11 @@ func (s *StateStore) SnapshotMinIndex(ctx context.Context, index uint64) (*State
retryTimer = time.NewTimer(backoffBase)
} else {
// Subsequent retry, reset timer
deadline := 1 << (2 * retries) * backoffBase
if deadline > backoffLimit {
deadline = backoffLimit
if deadline < backoffLimit {
deadline = 1 << (2 * retries) * backoffBase
if deadline > backoffLimit {
deadline = backoffLimit
}
tgross marked this conversation as resolved.
Show resolved Hide resolved
}
retryTimer.Reset(deadline)
}
Expand Down
21 changes: 14 additions & 7 deletions nomad/worker.go
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,9 @@ type Worker struct {

// failures is the count of errors encountered while dequeueing evaluations
// and is used to calculate backoff.
failures uint
evalToken string
failures uint
failureBackoff time.Duration
evalToken string

// snapshotIndex is the index of the snapshot in which the scheduler was
// first invoked. It is used to mark the SnapshotIndex of evaluations
Expand All @@ -133,6 +134,7 @@ func newWorker(ctx context.Context, srv *Server, args SchedulerWorkerPoolArgs) *
start: time.Now(),
status: WorkerStarting,
enabledSchedulers: make([]string, len(args.EnabledSchedulers)),
failureBackoff: time.Duration(0),
}
copy(w.enabledSchedulers, args.EnabledSchedulers)

Expand Down Expand Up @@ -874,12 +876,17 @@ func (w *Worker) shouldResubmit(err error) bool {
// backoff if the server or the worker is shutdown.
func (w *Worker) backoffErr(base, limit time.Duration) bool {
w.setWorkloadStatus(WorkloadBackoff)
backoff := (1 << (2 * w.failures)) * base
if backoff > limit {
backoff = limit
} else {
w.failures++

backoff := w.failureBackoff
if w.failureBackoff < limit {
tgross marked this conversation as resolved.
Show resolved Hide resolved
backoff = (1 << (2 * w.failures)) * base
if backoff > limit {
backoff = limit
} else {
w.failures++
}
}

select {
case <-time.After(backoff):
return false
Expand Down