Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[release-0.14] 🐛 Allow to set GracefulShutdownTimeout to -1, disabling timeouts #2198

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion pkg/manager/internal.go
Original file line number Diff line number Diff line change
Expand Up @@ -528,7 +528,12 @@ func (cm *controllerManager) engageStopProcedure(stopComplete <-chan struct{}) e
//
// The shutdown context immediately expires if the gracefulShutdownTimeout is not set.
var shutdownCancel context.CancelFunc
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
if cm.gracefulShutdownTimeout < 0 {
// We want to wait forever for the runnables to stop.
cm.shutdownCtx, shutdownCancel = context.WithCancel(context.Background())
} else {
cm.shutdownCtx, shutdownCancel = context.WithTimeout(context.Background(), cm.gracefulShutdownTimeout)
}
defer shutdownCancel()

// Start draining the errors before acquiring the lock to make sure we don't deadlock
Expand Down
44 changes: 44 additions & 0 deletions pkg/manager/manager_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1076,6 +1076,50 @@ var _ = Describe("manger.Manager", func() {
<-runnableStopped
})

It("should wait forever for runnables if gracefulShutdownTimeout is <0 (-1)", func() {
m, err := New(cfg, options)
Expect(err).NotTo(HaveOccurred())
for _, cb := range callbacks {
cb(m)
}
m.(*controllerManager).gracefulShutdownTimeout = time.Duration(-1)

Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
<-ctx.Done()
time.Sleep(100 * time.Millisecond)
return nil
}))).ToNot(HaveOccurred())
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
<-ctx.Done()
time.Sleep(200 * time.Millisecond)
return nil
}))).ToNot(HaveOccurred())
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
<-ctx.Done()
time.Sleep(500 * time.Millisecond)
return nil
}))).ToNot(HaveOccurred())
Expect(m.Add(RunnableFunc(func(ctx context.Context) error {
<-ctx.Done()
time.Sleep(1500 * time.Millisecond)
return nil
}))).ToNot(HaveOccurred())

ctx, cancel := context.WithCancel(context.Background())
managerStopDone := make(chan struct{})
go func() {
defer GinkgoRecover()
Expect(m.Start(ctx)).NotTo(HaveOccurred())
close(managerStopDone)
}()
<-m.Elected()
cancel()

beforeDone := time.Now()
<-managerStopDone
Expect(time.Since(beforeDone)).To(BeNumerically(">=", 1500*time.Millisecond))
})

}

Context("with defaults", func() {
Expand Down