Skip to content

Commit

Permalink
refactor(GODT-1728): Record multiple durations and decouple extra data
Browse files Browse the repository at this point in the history
These changes are required for the store benchmarks.

Multiple durations are required to capture each individual store
request's execution time. We now also  produce a benchmark report that
includes statistics for each benchmark run rather than just the total.

This patch also decouples the extra benchmark info that each individual
benchmark type can potentially produce that may be interesting for a
report.
  • Loading branch information
LBeernaertProton committed Aug 10, 2022
1 parent e70ceb7 commit 13db986
Show file tree
Hide file tree
Showing 6 changed files with 74 additions and 42 deletions.
12 changes: 6 additions & 6 deletions benchmarks/gluon_bench/benchmark/run.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,18 +76,18 @@ func RunMain() {

numRuns := *flags.BenchmarkRuns

var benchmarkRuns = make([]*reporter.BenchmarkRun, 0, numRuns)
var benchmarkStats = make([]*reporter.BenchmarkStatistics, 0, numRuns)

for r := uint(0); r < numRuns; r++ {
if *flags.Verbose {
fmt.Printf("IMAPBenchmark Run: %v\n", r)
}

benchRun := measureBenchmark(benchDirConfig, r, v)
benchmarkRuns = append(benchmarkRuns, benchRun)
benchStat := measureBenchmark(benchDirConfig, r, v)
benchmarkStats = append(benchmarkStats, benchStat)
}

benchmarkReports = append(benchmarkReports, reporter.NewBenchmarkReport(v.Name(), benchmarkRuns...))
benchmarkReports = append(benchmarkReports, reporter.NewBenchmarkReport(v.Name(), benchmarkStats...))

if *flags.Verbose {
fmt.Printf("End IMAPBenchmark: %v\n", v.Name())
Expand All @@ -109,7 +109,7 @@ func RunMain() {
}
}

func measureBenchmark(dirConfig BenchDirConfig, iteration uint, bench Benchmark) *reporter.BenchmarkRun {
func measureBenchmark(dirConfig BenchDirConfig, iteration uint, bench Benchmark) *reporter.BenchmarkStatistics {
benchPath, err := dirConfig.Get()

if err != nil {
Expand Down Expand Up @@ -140,5 +140,5 @@ func measureBenchmark(dirConfig BenchDirConfig, iteration uint, bench Benchmark)
panic(fmt.Sprintf("Failed to teardown benchmark %v: %v", bench.Name(), err))
}

return benchRun
return reporter.NewBenchmarkStatistics(benchRun.Extra, benchRun.Durations...)
}
5 changes: 1 addition & 4 deletions benchmarks/gluon_bench/gluon_benchmarks/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"github.com/ProtonMail/gluon/benchmarks/gluon_bench/reporter"
"github.com/ProtonMail/gluon/benchmarks/gluon_bench/utils"
"github.com/ProtonMail/gluon/imap"
"github.com/ProtonMail/gluon/profiling"
"github.com/google/uuid"
_ "github.com/mattn/go-sqlite3"
"github.com/sirupsen/logrus"
Expand Down Expand Up @@ -123,9 +122,7 @@ func (s *Sync) Run(ctx context.Context) (*reporter.BenchmarkRun, error) {
return nil, err
}

var cmdTimings [profiling.CmdTypeTotal][]time.Duration

return reporter.NewBenchmarkRun(timer.Elapsed(), cmdTimings), nil
return reporter.NewBenchmarkRunSingle(timer.Elapsed(), nil), nil
}

func (s *Sync) TearDown(ctx context.Context) error {
Expand Down
32 changes: 32 additions & 0 deletions benchmarks/gluon_bench/imap_benchmarks/imap_benchmark.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,12 @@ package imap_benchmarks

import (
"context"
"fmt"
"github.com/ProtonMail/gluon/benchmarks/gluon_bench/reporter"
"github.com/ProtonMail/gluon/profiling"
"net"
"strings"
"time"
)

// IMAPBenchmark is intended to be used to build benchmarks which bench IMAP commands on a given server.
Expand All @@ -19,3 +24,30 @@ type IMAPBenchmark interface {
// TearDown clear the benchmark state, this is not timed.
TearDown(ctx context.Context, addr net.Addr) error
}

type IMAPBenchmarkExtra struct {
CMDStatistic [profiling.CmdTypeTotal]*reporter.BenchmarkStatistics
}

func (i *IMAPBenchmarkExtra) String() string {
builder := strings.Builder{}

for n, v := range i.CMDStatistic {
if v.SampleCount == 0 {
continue
}

builder.WriteString(fmt.Sprintf("[%v] %v\n", profiling.CmdTypeToString(n), v.String()))
}

return builder.String()
}

func NewIMAPBenchmarkRun(duration time.Duration, cmdTimings [profiling.CmdTypeTotal][]time.Duration) *reporter.BenchmarkRun {
var cmdStatistic [profiling.CmdTypeTotal]*reporter.BenchmarkStatistics
for i, v := range cmdTimings {
cmdStatistic[i] = reporter.NewBenchmarkStatistics(nil, v...)
}

return reporter.NewBenchmarkRunSingle(duration, &IMAPBenchmarkExtra{CMDStatistic: cmdStatistic})
}
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func (i *IMAPBenchmarkRunner) Run(ctx context.Context) (*reporter.BenchmarkRun,
return nil, err
}

return reporter.NewBenchmarkRun(scopedTimer.Elapsed(), i.cmdProfilerBuilder.Merge()), nil
return NewIMAPBenchmarkRun(scopedTimer.Elapsed(), i.cmdProfilerBuilder.Merge()), nil
}

// TearDown clear the benchmark state, this is not timed.
Expand Down
53 changes: 33 additions & 20 deletions benchmarks/gluon_bench/reporter/reporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,10 @@ import (
"fmt"
"math"
"sort"
"strings"
"time"

"github.com/bradenaw/juniper/xslices"

"github.com/ProtonMail/gluon/profiling"
)

type BenchmarkStatistics struct {
Expand All @@ -21,22 +20,33 @@ type BenchmarkStatistics struct {
Percentile10 time.Duration
RMS time.Duration
SampleCount int
Extra BenchmarkExtra
}

func (b *BenchmarkStatistics) String() string {
return fmt.Sprintf("SampleCount:%04d Total:%v Fastest:%v Slowest:%v Average:%v Median:%v 90thPercentile:%v 10thPercentile:%v RMS:%v",
builder := strings.Builder{}
builder.WriteString(fmt.Sprintf("SampleCount:%04d Total:%v Fastest:%v Slowest:%v Average:%v Median:%v 90thPercentile:%v 10thPercentile:%v RMS:%v",
b.SampleCount, b.Total, b.Fastest, b.Slowest, b.Average,
b.Median, b.Percentile90, b.Percentile10, b.RMS,
)
))

if b.Extra != nil {
builder.WriteString(" Extra:\n")
builder.WriteString(b.Extra.String())
}

return builder.String()
}

func NewBenchmarkStatistics(durations ...time.Duration) BenchmarkStatistics {
func NewBenchmarkStatistics(extra BenchmarkExtra, durations ...time.Duration) *BenchmarkStatistics {
sortedDurations := durations
sort.Slice(sortedDurations, func(i1, i2 int) bool {
return sortedDurations[i1] < sortedDurations[i2]
})

statistics := BenchmarkStatistics{}
statistics := &BenchmarkStatistics{
Extra: extra,
}
statistics.SampleCount = len(sortedDurations)

if statistics.SampleCount == 1 {
Expand Down Expand Up @@ -75,32 +85,35 @@ func NewBenchmarkStatistics(durations ...time.Duration) BenchmarkStatistics {
return statistics
}

type BenchmarkExtra interface {
String() string
}

type BenchmarkRun struct {
Duration time.Duration
CmdStatistics [profiling.CmdTypeTotal]BenchmarkStatistics
Durations []time.Duration
Extra BenchmarkExtra
}

func NewBenchmarkRun(duration time.Duration, cmdTimings [profiling.CmdTypeTotal][]time.Duration) *BenchmarkRun {
var cmdStatistic [profiling.CmdTypeTotal]BenchmarkStatistics
for i, v := range cmdTimings {
cmdStatistic[i] = NewBenchmarkStatistics(v...)
}
func NewBenchmarkRunSingle(duration time.Duration, extra BenchmarkExtra) *BenchmarkRun {
return &BenchmarkRun{Durations: []time.Duration{duration}, Extra: extra}
}

return &BenchmarkRun{Duration: duration, CmdStatistics: cmdStatistic}
func NewBenchmarkRun(durations []time.Duration, extra BenchmarkExtra) *BenchmarkRun {
return &BenchmarkRun{Durations: durations, Extra: extra}
}

type BenchmarkReport struct {
Name string
Runs []*BenchmarkRun
Statistics BenchmarkStatistics
Runs []*BenchmarkStatistics
Statistics *BenchmarkStatistics
}

func NewBenchmarkReport(name string, runs ...*BenchmarkRun) *BenchmarkReport {
durations := xslices.Map(runs, func(r *BenchmarkRun) time.Duration {
return r.Duration
func NewBenchmarkReport(name string, runs ...*BenchmarkStatistics) *BenchmarkReport {
durations := xslices.Map(runs, func(r *BenchmarkStatistics) time.Duration {
return r.Total
})

return &BenchmarkReport{Name: name, Runs: runs, Statistics: NewBenchmarkStatistics(durations...)}
return &BenchmarkReport{Name: name, Runs: runs, Statistics: NewBenchmarkStatistics(nil, durations...)}
}

// BenchmarkReporter is the interface that is required to be implemented by any report generation tool.
Expand Down
12 changes: 1 addition & 11 deletions benchmarks/gluon_bench/reporter/stdout_reporter.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@ package reporter

import (
"fmt"

"github.com/ProtonMail/gluon/profiling"
)

// StdOutReporter prints the benchmark report to os.Stdout.
Expand All @@ -15,15 +13,7 @@ func (*StdOutReporter) ProduceReport(reports []*BenchmarkReport) error {
fmt.Printf("[%02d] %v\n", i, v.Statistics.String())

for r, v := range v.Runs {
fmt.Printf("[%02d] Run %02d - Time: %v\n", i, r, v.Duration)

for n, v := range v.CmdStatistics {
if v.SampleCount == 0 {
continue
}

fmt.Printf("[%02d] [%02d] [%v] %v\n", i, r, profiling.CmdTypeToString(n), v.String())
}
fmt.Printf("[%02d] Run %02d - %v\n", i, r, v.String())
}
}

Expand Down

0 comments on commit 13db986

Please sign in to comment.