Skip to content

Commit

Permalink
time: clean up benchmarks
Browse files Browse the repository at this point in the history
Comparing BenchmarkStop against very old commits like
CL 13094043, I was very confused about how timers had
gotten almost 10X slower since 2013.

It turns out that CL 68060043 introduced a factor of 1000
in the benchmark cost, by counting batches of 1000 as 1 op
instead of 1000 ops, and timers have actually gotten
dramatically faster since 2013, with the addition of per-P
timer heaps and other optimizations.

This CL rewrites the benchmarks to use testing.PB directly,
so that the factor of 1000 disappears, and "/op" really means "/op".
In the few tests that need to run in batches for one reason or
another, add "1000" to the name to make clear that batches
are being run.

Change-Id: I27ed74d1e420934982e4205aad4f218cdfc42509
Reviewed-on: https://go-review.googlesource.com/c/go/+/570495
Auto-Submit: Russ Cox <rsc@golang.org>
Reviewed-by: Ian Lance Taylor <iant@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
  • Loading branch information
rsc authored and pull[bot] committed Apr 12, 2024
1 parent 4a7960d commit b04064c
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 61 deletions.
116 changes: 61 additions & 55 deletions src/time/sleep_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -148,8 +148,7 @@ func TestAfterFuncStarvation(t *testing.T) {
wg.Wait()
}

func benchmark(b *testing.B, bench func(n int)) {

func benchmark(b *testing.B, bench func(*testing.PB)) {
// Create equal number of garbage timers on each P before starting
// the benchmark.
var wg sync.WaitGroup
Expand All @@ -168,11 +167,7 @@ func benchmark(b *testing.B, bench func(n int)) {
wg.Wait()

b.ResetTimer()
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
bench(1000)
}
})
b.RunParallel(bench)
b.StopTimer()

for _, garbage := range garbageAll {
Expand All @@ -182,105 +177,116 @@ func benchmark(b *testing.B, bench func(n int)) {
}
}

func BenchmarkAfterFunc(b *testing.B) {
benchmark(b, func(n int) {
c := make(chan bool)
var f func()
f = func() {
n--
if n >= 0 {
AfterFunc(0, f)
} else {
c <- true
func BenchmarkAfterFunc1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
n := 1000
c := make(chan bool)
var f func()
f = func() {
n--
if n >= 0 {
AfterFunc(0, f)
} else {
c <- true
}
}
AfterFunc(0, f)
<-c
}

AfterFunc(0, f)
<-c
})
}

func BenchmarkAfter(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
<-After(1)
}
})
}

func BenchmarkStop(b *testing.B) {
b.Run("impl=chan", func(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
NewTimer(1 * Second).Stop()
}
})
})
b.Run("impl=func", func(b *testing.B) {
benchmark(b, func(n int) {
for i := 0; i < n; i++ {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
newTimerFunc(1 * Second).Stop()
}
})
})
}

func BenchmarkSimultaneousAfterFunc(b *testing.B) {
benchmark(b, func(n int) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
AfterFunc(0, wg.Done)
func BenchmarkSimultaneousAfterFunc1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
n := 1000
var wg sync.WaitGroup
wg.Add(n)
for range n {
AfterFunc(0, wg.Done)
}
wg.Wait()
}
wg.Wait()
})
}

func BenchmarkStartStop(b *testing.B) {
benchmark(b, func(n int) {
timers := make([]*Timer, n)
for i := 0; i < n; i++ {
timers[i] = AfterFunc(Hour, nil)
}
func BenchmarkStartStop1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
const N = 1000
timers := make([]*Timer, N)
for i := range timers {
timers[i] = AfterFunc(Hour, nil)
}

for i := 0; i < n; i++ {
timers[i].Stop()
for i := range timers {
timers[i].Stop()
}
}
})
}

func BenchmarkReset(b *testing.B) {
b.Run("impl=chan", func(b *testing.B) {
benchmark(b, func(n int) {
benchmark(b, func(pb *testing.PB) {
t := NewTimer(Hour)
for i := 0; i < n; i++ {
for pb.Next() {
t.Reset(Hour)
}
t.Stop()
})
})
b.Run("impl=func", func(b *testing.B) {
benchmark(b, func(n int) {
benchmark(b, func(pb *testing.PB) {
t := newTimerFunc(Hour)
for i := 0; i < n; i++ {
for pb.Next() {
t.Reset(Hour)
}
t.Stop()
})
})
}

func BenchmarkSleep(b *testing.B) {
benchmark(b, func(n int) {
var wg sync.WaitGroup
wg.Add(n)
for i := 0; i < n; i++ {
go func() {
Sleep(Nanosecond)
wg.Done()
}()
func BenchmarkSleep1000(b *testing.B) {
benchmark(b, func(pb *testing.PB) {
for pb.Next() {
const N = 1000
var wg sync.WaitGroup
wg.Add(N)
for range N {
go func() {
Sleep(Nanosecond)
wg.Done()
}()
}
wg.Wait()
}
wg.Wait()
})
}

Expand Down
12 changes: 6 additions & 6 deletions src/time/tick_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,29 +227,29 @@ func TestLongAdjustTimers(t *testing.T) {
}
}
func BenchmarkTicker(b *testing.B) {
benchmark(b, func(n int) {
benchmark(b, func(pb *testing.PB) {
ticker := NewTicker(Nanosecond)
for i := 0; i < n; i++ {
for pb.Next() {
<-ticker.C
}
ticker.Stop()
})
}

func BenchmarkTickerReset(b *testing.B) {
benchmark(b, func(n int) {
benchmark(b, func(pb *testing.PB) {
ticker := NewTicker(Nanosecond)
for i := 0; i < n; i++ {
for pb.Next() {
ticker.Reset(Nanosecond * 2)
}
ticker.Stop()
})
}

func BenchmarkTickerResetNaive(b *testing.B) {
benchmark(b, func(n int) {
benchmark(b, func(pb *testing.PB) {
ticker := NewTicker(Nanosecond)
for i := 0; i < n; i++ {
for pb.Next() {
ticker.Stop()
ticker = NewTicker(Nanosecond * 2)
}
Expand Down

0 comments on commit b04064c

Please sign in to comment.