diff --git a/cmd/benchmark/benchmark.go b/cmd/benchmark/benchmark.go index e5f4f312e..9d7b2daaf 100644 --- a/cmd/benchmark/benchmark.go +++ b/cmd/benchmark/benchmark.go @@ -3,14 +3,62 @@ package main import ( "fmt" "os" + "strings" "github.com/urfave/cli/v2" "github.com/kakao/varlog/internal/benchmark" - "github.com/kakao/varlog/internal/flags" "github.com/kakao/varlog/pkg/types" ) +var ( + flagClusterID = &cli.StringFlag{ + Name: "cluster", + Usage: "Cluster ID", + Value: benchmark.DefaultClusterID.String(), + } + flagTarget = &cli.StringSliceFlag{ + Name: "target", + Required: true, + Usage: "The target of the benchmark load formatted by \"topic1:logstream1,topic2:logstream2,...\"", + } + flagMRAddrs = &cli.StringSliceFlag{ + Name: "address", + Required: true, + } + flagMsgSize = &cli.UintSliceFlag{ + Name: "message-size", + Aliases: []string{"msg-size"}, + Value: cli.NewUintSlice(benchmark.DefaultMessageSize), + Usage: "Message sizes for each load target", + } + flagBatchSize = &cli.UintSliceFlag{ + Name: "batch-size", + Value: cli.NewUintSlice(benchmark.DefaultBatchSize), + Usage: "Batch sizes for each load target", + } + flagAppenders = &cli.UintSliceFlag{ + Name: "appenders", + Aliases: []string{"appenders-count"}, + Value: cli.NewUintSlice(benchmark.DefaultConcurrency), + Usage: "The number of appenders for each load target", + } + flagSubscribers = &cli.UintSliceFlag{ + Name: "subscribers", + Aliases: []string{"subscribers-count"}, + Value: cli.NewUintSlice(benchmark.DefaultConcurrency), + Usage: "The number of subscribers for each load target", + } + flagDuration = &cli.DurationFlag{ + Name: "duration", + Value: benchmark.DefaultDuration, + } + flagReportInterval = &cli.DurationFlag{ + Name: "report-interval", + Value: benchmark.DefaultReportInterval, + } +) + func main() { os.Exit(run()) } @@ -28,33 +76,21 @@ func newApp() *cli.App { app := &cli.App{ Name: "benchmark", Flags: []cli.Flag{ - flagClusterID.StringFlag(false, benchmark.DefaultClusterID.String()), - flagTopicID.StringFlag(true, ""), - flagLogStreamID.StringFlag(true, ""), - flagMRAddrs.StringSliceFlag(true, nil), - flagMsgSize.IntFlag(true, 0), - flagBatchSize.IntFlag(false, benchmark.DefaultBatchSize), - flagConcurrency.IntFlag(false, benchmark.DefaultConcurrency), - flagDuration.DurationFlag(false, benchmark.DefaultDuration), - flagReportInterval.DurationFlag(false, benchmark.DefaultReportInterval), + flagClusterID, + flagTarget, + flagMRAddrs, + flagMsgSize, + flagBatchSize, + flagAppenders, + flagSubscribers, + flagDuration, + flagReportInterval, }, Action: start, } return app } -var ( - flagClusterID = flags.ClusterID() - flagTopicID = flags.TopicID() - flagLogStreamID = flags.LogStreamID() - flagMRAddrs = flags.FlagDesc{Name: "address"} - flagMsgSize = flags.FlagDesc{Name: "msg-size"} - flagBatchSize = flags.FlagDesc{Name: "batch-size"} - flagConcurrency = flags.FlagDesc{Name: "concurrency"} - flagDuration = flags.FlagDesc{Name: "duration"} - flagReportInterval = flags.FlagDesc{Name: "report-interval"} -) - func start(c *cli.Context) error { if c.NArg() > 0 { return fmt.Errorf("unexpected args: %v", c.Args().Slice()) @@ -65,44 +101,70 @@ func start(c *cli.Context) error { return err } - topicID, err := types.ParseTopicID(c.String(flagTopicID.Name)) - if err != nil { - return err + targets := make([]benchmark.Target, len(c.StringSlice(flagTarget.Name))) + for idx, str := range c.StringSlice(flagTarget.Name) { + toks := strings.Split(str, ":") + if len(toks) != 2 { + return fmt.Errorf("malformed target %s", str) + } + var target benchmark.Target + target.TopicID, err = types.ParseTopicID(toks[0]) + if err != nil { + return fmt.Errorf("malformed target %s: invalid topic %s", str, toks[0]) + } + if toks[1] != "*" { + target.LogStreamID, err = types.ParseLogStreamID(toks[1]) + if err != nil { + return fmt.Errorf("malformed target %s: invalid log stream %s", str, toks[1]) + } + } + targets[idx] = target } - logStreamID, err := types.ParseLogStreamID(c.String(flagLogStreamID.Name)) - if err != nil { - return err - } + targets = setSizes(targets, c.UintSlice(flagMsgSize.Name), func(idx int, size uint) { + targets[idx].MessageSize = size + }) - msgSize := c.Int(flagMsgSize.Name) - if msgSize < 0 { - return fmt.Errorf("stress: invalid msg size %d", msgSize) - } + targets = setSizes(targets, c.UintSlice(flagBatchSize.Name), func(idx int, size uint) { + targets[idx].BatchSize = size + }) - batchSize := c.Int(flagBatchSize.Name) - if batchSize <= 0 { - return fmt.Errorf("stress: invalid batch size %d", batchSize) - } - - concurrency := c.Int(flagConcurrency.Name) - if concurrency < 1 { - return fmt.Errorf("stress: invalid concurrency %d", concurrency) - } + targets = setSizes(targets, c.UintSlice(flagAppenders.Name), func(idx int, size uint) { + targets[idx].AppendersCount = size + }) + targets = setSizes(targets, c.UintSlice(flagSubscribers.Name), func(idx int, size uint) { + targets[idx].SubscribersCount = size + }) duration := c.Duration(flagDuration.Name) reportInterval := c.Duration(flagReportInterval.Name) - return benchmark.Append( + bm, err := benchmark.New( benchmark.WithClusterID(clusterID), - benchmark.WithTopicID(topicID), - benchmark.WithLogStreamID(logStreamID), + benchmark.WithTargets(targets...), benchmark.WithMetadataRepository(c.StringSlice(flagMRAddrs.Name)), - benchmark.WithMessageSize(msgSize), - benchmark.WithBatchSize(batchSize), - benchmark.WithConcurrency(concurrency), benchmark.WithDuration(duration), benchmark.WithReportInterval(reportInterval), ) + if err != nil { + return err + } + defer func() { + _ = bm.Close() + }() + return bm.Run() +} + +func setSizes(targets []benchmark.Target, sizes []uint, setter func(idx int, size uint)) []benchmark.Target { + for idx := range targets { + var size uint + if idx < len(sizes) { + size = sizes[idx] + } else { + size = sizes[len(sizes)-1] + } + setter(idx, size) + } + return targets } diff --git a/go.mod b/go.mod index c39d28c9d..a802f9264 100644 --- a/go.mod +++ b/go.mod @@ -32,6 +32,7 @@ require ( go.uber.org/goleak v1.2.0 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.23.0 + golang.org/x/exp v0.0.0-20221114191408-850992195362 golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 golang.org/x/sync v0.1.0 golang.org/x/sys v0.2.0 @@ -104,7 +105,6 @@ require ( go.opentelemetry.io/otel/trace v1.11.1 // indirect go.opentelemetry.io/proto/otlp v0.11.0 // indirect go.uber.org/atomic v1.7.0 // indirect - golang.org/x/exp v0.0.0-20200513190911-00229845015e // indirect golang.org/x/mod v0.7.0 // indirect golang.org/x/net v0.2.0 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect diff --git a/go.sum b/go.sum index ea8298529..3094943f2 100644 --- a/go.sum +++ b/go.sum @@ -568,8 +568,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200513190911-00229845015e h1:rMqLP+9XLy+LdbCXHjJHAmTfXCr93W7oruWA6Hq1Alc= -golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/exp v0.0.0-20221114191408-850992195362 h1:NoHlPRbyl1VFI6FjwHtPQCN7wAMXI6cKcqrmXhOOfBQ= +golang.org/x/exp v0.0.0-20221114191408-850992195362/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/internal/benchmark/benchmark.go b/internal/benchmark/benchmark.go index 8df2aaabc..6669fd3c9 100644 --- a/internal/benchmark/benchmark.go +++ b/internal/benchmark/benchmark.go @@ -3,147 +3,128 @@ package benchmark import ( "context" "fmt" - "io" "os" + "os/signal" "sync" - "sync/atomic" + "syscall" "time" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/kakao/varlog/pkg/varlog" + "go.uber.org/multierr" + "golang.org/x/exp/slog" + "golang.org/x/sync/errgroup" ) -type appendStat struct { - success int64 - failure int64 - totalBytes int64 - startTime time.Time - durations int64 +type Benchmark struct { + config + loaders []*Loader + metrics Metrics } -func newAppendStat() *appendStat { - return &appendStat{ - startTime: time.Now(), +// New creates a new Benchmark and returns it. Users must call Close to release resources if it returns successfully. +func New(opts ...Option) (bm *Benchmark, err error) { + cfg, err := newConfig(opts) + if err != nil { + return nil, err } -} - -func (as *appendStat) addSuccess(success int64) { - atomic.AddInt64(&as.success, success) -} -func (as *appendStat) addFailure(failure int64) { - atomic.AddInt64(&as.failure, failure) -} + logger := slog.New(slog.HandlerOptions{ + Level: slog.InfoLevel.Level(), + }.NewTextHandler(os.Stdout)) + slog.SetDefault(logger) -func (as *appendStat) elapsedTime() float64 { - return time.Since(as.startTime).Seconds() -} - -func (as *appendStat) addResponseTime(responseTime time.Duration) { - atomic.AddInt64(&as.durations, responseTime.Milliseconds()) -} - -func (as *appendStat) throughput() float64 { - duration := as.elapsedTime() - if duration == 0 { - return 0 + bm = &Benchmark{ + config: cfg, } - return float64(as.totalBytes) / duration / 1024.0 / 1024.0 -} -func (as *appendStat) responseTime() float64 { - return float64(as.durations) / float64(as.success) -} - -func (as *appendStat) addBytes(byteSize int64) { - as.totalBytes += byteSize -} + defer func() { + if err != nil { + _ = bm.Close() + } + }() -func (as *appendStat) printHeader(w io.Writer) { - fmt.Fprintln(w, "success\tfailure\tMB\telapsed_time(s)\tthroughput(MB/s)\tresponse_time(ms)") -} + for idx, target := range bm.targets { + var loader *Loader + loaderMetrics := &LoaderMetrics{idx: idx} + loader, err = NewLoader(loaderConfig{ + Target: target, + cid: bm.cid, + mraddrs: bm.mraddrs, + metrics: loaderMetrics, + }) + if err != nil { + return bm, err + } + bm.loaders = append(bm.loaders, loader) + bm.metrics.loaderMetrics = append(bm.metrics.loaderMetrics, loaderMetrics) + } -func (as *appendStat) printStat(w io.Writer) { - fmt.Fprintf(w, "%d\t%d\t%f\t%f\t%f\t%f\n", as.success, as.failure, float64(as.totalBytes)/float64(1<<20), as.elapsedTime(), as.throughput(), as.responseTime()) + return bm, nil } -func Append(opts ...Option) error { - cfg, err := newConfig(opts) - if err != nil { - return err - } +// Run starts Loaders and metric reporter. It blocks until the loaders are finished. +func (bm *Benchmark) Run() error { + ctx, cancel := context.WithCancel(context.Background()) + g, ctx := errgroup.WithContext(ctx) - batch := make([][]byte, cfg.batchSize) - for i := 0; i < cfg.batchSize; i++ { - msg := make([]byte, cfg.msgSize) - for j := 0; j < cfg.msgSize; j++ { - msg[j] = '.' - } - batch[i] = msg - } - payloadBytes := int64(cfg.msgSize * cfg.batchSize) + benchmarkTimer := time.NewTimer(bm.duration) - newAppendStat().printHeader(os.Stderr) + sigC := make(chan os.Signal, 1) + signal.Notify(sigC, os.Interrupt, syscall.SIGTERM) - ctx, cancel := context.WithTimeout(context.Background(), cfg.duration) - defer cancel() + reportTick := time.NewTicker(bm.reportInterval) + defer reportTick.Stop() - var reportWg sync.WaitGroup - stat := newAppendStat() - reportWg.Add(1) + var finished bool + var wg sync.WaitGroup + wg.Add(1) go func() { - timer := time.NewTimer(cfg.reportInterval) defer func() { - timer.Stop() - reportWg.Done() + cancel() + wg.Done() + fmt.Println(bm.metrics.String()) }() - for { select { + case <-benchmarkTimer.C: + finished = true + slog.Debug("benchmark is finished") + return + case sig := <-sigC: + finished = true + slog.Debug("caught signal", slog.String("signal", sig.String())) + return case <-ctx.Done(): + slog.Debug("loader failed") return - case <-timer.C: - stat.printStat(os.Stderr) - timer.Reset(cfg.reportInterval) + case <-reportTick.C: + s := bm.metrics.String() + fmt.Println(s) } } + }() - var wg sync.WaitGroup - wg.Add(cfg.concurrency) - for i := 0; i < cfg.concurrency; i++ { - go func() { - defer wg.Done() - vlog, err := varlog.Open(ctx, cfg.cid, cfg.mraddrs, varlog.WithGRPCDialOptions( - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithReadBufferSize(1<<20), - grpc.WithWriteBufferSize(32<<20), - )) - if err != nil { - panic(err) - } - defer func() { - _ = vlog.Close() - }() - for ctx.Err() == nil { - before := time.Now() - res := vlog.AppendTo(ctx, cfg.tpid, cfg.lsid, batch) - if res.Err != nil { - stat.addFailure(1) - continue - } - stat.addResponseTime(time.Since(before)) - stat.addSuccess(1) - stat.addBytes(payloadBytes) - } - }() + for _, tw := range bm.loaders { + tw := tw + g.Go(func() error { + return tw.Run(ctx) + }) } + err := g.Wait() wg.Wait() - cancel() - reportWg.Wait() - stat.printStat(os.Stderr) - return nil + slog.Debug("stopped benchmark") + if finished { + return nil + } + return err +} + +// Close releases resources acquired by Benchmark and Loader. +func (bm *Benchmark) Close() error { + var err error + for _, loader := range bm.loaders { + err = multierr.Append(err, loader.Close()) + } + return err } diff --git a/internal/benchmark/config.go b/internal/benchmark/config.go index 0c14615c8..be6922c11 100644 --- a/internal/benchmark/config.go +++ b/internal/benchmark/config.go @@ -2,7 +2,6 @@ package benchmark import ( "errors" - "fmt" "time" "github.com/kakao/varlog/pkg/types" @@ -10,20 +9,17 @@ import ( const ( DefaultClusterID = types.ClusterID(1) + DefaultMessageSize = 0 DefaultBatchSize = 1 - DefaultConcurrency = 1 + DefaultConcurrency = 0 DefaultDuration = 1 * time.Minute - DefaultReportInterval = 5 * time.Second + DefaultReportInterval = 3 * time.Second ) type config struct { cid types.ClusterID - tpid types.TopicID - lsid types.LogStreamID + targets []Target mraddrs []string - msgSize int - batchSize int - concurrency int duration time.Duration reportInterval time.Duration } @@ -31,8 +27,6 @@ type config struct { func newConfig(opts []Option) (config, error) { cfg := config{ cid: DefaultClusterID, - batchSize: DefaultBatchSize, - concurrency: DefaultConcurrency, duration: DefaultDuration, reportInterval: DefaultReportInterval, } @@ -46,14 +40,16 @@ func newConfig(opts []Option) (config, error) { } func (cfg *config) validate() error { - if cfg.tpid.Invalid() { - return fmt.Errorf("invalid topic %v", cfg.tpid) - } if len(cfg.mraddrs) == 0 { return errors.New("no metadata repository address") } - if cfg.batchSize < 1 { - return fmt.Errorf("non-positive batch size %d", cfg.batchSize) + if len(cfg.targets) == 0 { + return errors.New("no load target") + } + for _, target := range cfg.targets { + if err := target.Valid(); err != nil { + return err + } } return nil } @@ -80,15 +76,9 @@ func WithClusterID(cid types.ClusterID) Option { }) } -func WithTopicID(tpid types.TopicID) Option { +func WithTargets(targets ...Target) Option { return newFuncOption(func(cfg *config) { - cfg.tpid = tpid - }) -} - -func WithLogStreamID(lsid types.LogStreamID) Option { - return newFuncOption(func(cfg *config) { - cfg.lsid = lsid + cfg.targets = targets }) } @@ -98,23 +88,6 @@ func WithMetadataRepository(addrs []string) Option { }) } -func WithMessageSize(msgSize int) Option { - return newFuncOption(func(cfg *config) { - cfg.msgSize = msgSize - }) -} - -func WithBatchSize(batchSize int) Option { - return newFuncOption(func(cfg *config) { - cfg.batchSize = batchSize - }) -} -func WithConcurrency(concurrency int) Option { - return newFuncOption(func(cfg *config) { - cfg.concurrency = concurrency - }) -} - func WithDuration(duration time.Duration) Option { return newFuncOption(func(cfg *config) { cfg.duration = duration diff --git a/internal/benchmark/loader.go b/internal/benchmark/loader.go new file mode 100644 index 000000000..b086a06ec --- /dev/null +++ b/internal/benchmark/loader.go @@ -0,0 +1,245 @@ +package benchmark + +import ( + "context" + "fmt" + "time" + + "go.uber.org/multierr" + "go.uber.org/zap" + "golang.org/x/exp/slog" + "golang.org/x/sync/errgroup" + + "github.com/kakao/varlog/pkg/types" + "github.com/kakao/varlog/pkg/varlog" + "github.com/kakao/varlog/proto/varlogpb" +) + +type loaderConfig struct { + Target + cid types.ClusterID + mraddrs []string + metrics *LoaderMetrics +} + +type Loader struct { + loaderConfig + batch [][]byte + apps []varlog.Log + subs []varlog.Log + begin struct { + lsn varlogpb.LogSequenceNumber + ch chan varlogpb.LogSequenceNumber + } + logger *slog.Logger +} + +func NewLoader(cfg loaderConfig) (loader *Loader, err error) { + loader = &Loader{ + loaderConfig: cfg, + } + loader.begin.ch = make(chan varlogpb.LogSequenceNumber, cfg.AppendersCount) + loader.logger = slog.With(slog.Any("topic", loader.TopicID)) + + defer func() { + if err != nil { + _ = loader.Close() + } + }() + + var c varlog.Log + for i := uint(0); i < loader.AppendersCount; i++ { + c, err = varlog.Open(context.TODO(), loader.cid, loader.mraddrs) + if err != nil { + return loader, err + } + loader.apps = append(loader.apps, c) + } + + for i := uint(0); i < loader.SubscribersCount; i++ { + c, err = varlog.Open(context.TODO(), loader.cid, loader.mraddrs) + if err != nil { + return loader, err + } + loader.subs = append(loader.subs, c) + } + + msg := NewMessage(loader.MessageSize) + loader.batch = make([][]byte, loader.BatchSize) + for i := range loader.batch { + loader.batch[i] = msg + } + + loader.logger.Debug("created topic worker") + return loader, nil +} + +// Run starts goroutines that append log entries and subscribe to them. +func (loader *Loader) Run(ctx context.Context) (err error) { + loader.metrics.Reset(time.Now()) + + ctx, cancel := context.WithCancel(ctx) + g, ctx := errgroup.WithContext(ctx) + + defer func() { + err = multierr.Append(err, g.Wait()) + cancel() + }() + + for i := 0; i < len(loader.apps); i++ { + c := loader.apps[i] + g.Go(func() error { + return loader.appendLoop(ctx, c) + }) + } + + err = loader.setBeginLSN(ctx) + if err != nil { + err = fmt.Errorf("begin lsn: %w", err) + return err + } + + for i := 0; i < len(loader.subs); i++ { + c := loader.subs[i] + g.Go(func() error { + return loader.subscribeLoop(ctx, c) + }) + } + + return err +} + +func (loader *Loader) Close() error { + var err error + for _, c := range loader.apps { + err = multierr.Append(err, c.Close()) + } + for _, c := range loader.subs { + err = multierr.Append(err, c.Close()) + } + return err +} + +func (loader *Loader) appendLoop(ctx context.Context, c varlog.Log) error { + begin := true + var am AppendMetrics + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + ts := time.Now() + var res varlog.AppendResult + if loader.LogStreamID.Invalid() { + res = c.Append(ctx, loader.TopicID, loader.batch) + } else { + res = c.AppendTo(ctx, loader.TopicID, loader.LogStreamID, loader.batch) + } + dur := time.Since(ts) + if res.Err != nil { + return fmt.Errorf("append: %w", res.Err) + } + if begin { + loader.begin.ch <- varlogpb.LogSequenceNumber{ + LLSN: res.Metadata[0].LLSN, + GLSN: res.Metadata[0].GLSN, + } + begin = false + } + cnt := len(res.Metadata) + am.bytes += int64(loader.BatchSize * loader.MessageSize) + am.requests++ + am.durationMS += dur.Milliseconds() + if loader.metrics.ReportAppendMetrics(am) { + am = AppendMetrics{} + } + + loader.logger.Debug("append", + slog.Int("count", cnt), + slog.Any("logstream", res.Metadata[0].LogStreamID), + slog.Any("firstGLSN", res.Metadata[0].GLSN), + slog.Any("lastGLSN", res.Metadata[cnt-1].GLSN), + slog.Any("firstLLSN", res.Metadata[0].LLSN), + slog.Any("lastLLSN", res.Metadata[cnt-1].LLSN), + ) + } +} + +func (loader *Loader) subscribeLoop(ctx context.Context, c varlog.Log) error { + var sm SubscribeMetrics + if loader.LogStreamID.Invalid() { + var subErr error + stop := make(chan struct{}) + closer, err := c.Subscribe(ctx, loader.TopicID, loader.begin.lsn.GLSN, types.MaxGLSN, func(logEntry varlogpb.LogEntry, err error) { + if err != nil { + subErr = err + close(stop) + return + } + loader.logger.Debug("subscribed", zap.String("log", logEntry.String())) + sm.logs++ + sm.bytes += int64(len(logEntry.Data)) + if loader.metrics.ReportSubscribeMetrics(sm) { + sm = SubscribeMetrics{} + } + }) + if err != nil { + return err + } + defer closer() + + select { + case <-stop: + if subErr != nil { + return fmt.Errorf("subscribe: %w", subErr) + } + return nil + case <-ctx.Done(): + return ctx.Err() + } + } + + subscriber := c.SubscribeTo(ctx, loader.TopicID, loader.LogStreamID, loader.begin.lsn.LLSN, types.MaxLLSN) + defer func() { + _ = subscriber.Close() + }() + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + logEntry, err := subscriber.Next() + if err != nil { + return fmt.Errorf("subscribe: %w", err) + } + loader.logger.Debug("subscribeTo", slog.Any("llsn", logEntry.LLSN)) + sm.logs++ + sm.bytes += int64(len(logEntry.Data)) + if loader.metrics.ReportSubscribeMetrics(sm) { + sm = SubscribeMetrics{} + } + } +} + +func (loader *Loader) setBeginLSN(ctx context.Context) error { + beginLSN := varlogpb.LogSequenceNumber{ + LLSN: types.MaxLLSN, + GLSN: types.MaxGLSN, + } + for i := uint(0); i < loader.AppendersCount; i++ { + select { + case lsn := <-loader.begin.ch: + if lsn.GLSN < beginLSN.GLSN { + beginLSN = lsn + } + case <-ctx.Done(): + return ctx.Err() + } + } + loader.begin.lsn = beginLSN + loader.logger.Debug("begin lsn", slog.Any("lsn", beginLSN)) + return nil +} diff --git a/internal/benchmark/message.go b/internal/benchmark/message.go new file mode 100644 index 000000000..92c0e6090 --- /dev/null +++ b/internal/benchmark/message.go @@ -0,0 +1,20 @@ +package benchmark + +import ( + "math/rand" + "time" +) + +const charset = "abcdefghijklmnopqrstuvwxyz" + +func NewMessage(size uint) []byte { + if size == 0 { + return []byte{} + } + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + msg := make([]byte, size) + for i := range msg { + msg[i] = charset[rng.Intn(len(charset))] + } + return msg +} diff --git a/internal/benchmark/metrics.go b/internal/benchmark/metrics.go new file mode 100644 index 000000000..ef318663f --- /dev/null +++ b/internal/benchmark/metrics.go @@ -0,0 +1,106 @@ +package benchmark + +import ( + "fmt" + "strings" + "sync" + "time" +) + +type Metrics struct { + loaderMetrics []*LoaderMetrics +} + +func (m Metrics) String() string { + // __idx__: target index + // _arps__: appended requests per second + // _ambps_: appended megabytes per second + // _adurs_: mean/min/max append duration in milliseconds + // _slps__: subscribed logs per second + // _smbps_: subscribed megabytes per second + // _eelat_: end-to-end latency in milliseconds + + var sb strings.Builder + for _, tm := range m.loaderMetrics { + tm.Flush(&sb) + } + return sb.String() +} + +type LoaderMetrics struct { + idx int + + mu sync.Mutex + lastTime time.Time + appendMetrics AppendMetrics + subscribeMetrics SubscribeMetrics +} + +type AppendMetrics struct { + requests int64 + bytes int64 + durationMS int64 +} + +type SubscribeMetrics struct { + logs int64 + bytes int64 +} + +func (twm *LoaderMetrics) Reset(now time.Time) { + twm.appendMetrics = AppendMetrics{} + twm.subscribeMetrics = SubscribeMetrics{} + twm.lastTime = now +} + +func (twm *LoaderMetrics) ReportAppendMetrics(m AppendMetrics) bool { + if !twm.mu.TryLock() { + return false + } + defer twm.mu.Unlock() + twm.appendMetrics.requests += m.requests + twm.appendMetrics.bytes += m.bytes + twm.appendMetrics.durationMS += m.durationMS + return true +} + +func (twm *LoaderMetrics) ReportSubscribeMetrics(m SubscribeMetrics) bool { + if !twm.mu.TryLock() { + return false + } + defer twm.mu.Unlock() + twm.subscribeMetrics.logs += m.logs + twm.subscribeMetrics.bytes += m.bytes + return true +} + +func (twm *LoaderMetrics) Flush(sb *strings.Builder) { + const mib = 1 << 20 + + twm.mu.Lock() + defer twm.mu.Unlock() + + now := time.Now() + interval := now.Sub(twm.lastTime).Seconds() + + // _arps__: appended requests per second + // _ambps_: appended megabytes per second + // _adurs_: mean/min/max append duration in milliseconds + // _slps__: subscribed logs per second + // _smbps_: subscribed megabytes per second + // _eelat_: end-to-end latency in milliseconds + + fmt.Fprintf(sb, "idx=%2d\t", twm.idx) + + ap := &twm.appendMetrics + fmt.Fprintf(sb, "arps=%2.1f\t", float64(ap.requests)/interval) + fmt.Fprintf(sb, "abps=%7.1f\t", float64(ap.bytes)/interval/mib) + + sm := &twm.subscribeMetrics + fmt.Fprintf(sb, "slps=%2.1f\t", float64(sm.logs)/interval) + fmt.Fprintf(sb, "sbps=%7.1f", float64(sm.bytes)/interval/mib) + + twm.appendMetrics = AppendMetrics{} + twm.subscribeMetrics = SubscribeMetrics{} + twm.lastTime = now +} diff --git a/internal/benchmark/target.go b/internal/benchmark/target.go new file mode 100644 index 000000000..8d3f897fd --- /dev/null +++ b/internal/benchmark/target.go @@ -0,0 +1,30 @@ +package benchmark + +import ( + "errors" + "fmt" + + "github.com/kakao/varlog/pkg/types" +) + +type Target struct { + TopicID types.TopicID + LogStreamID types.LogStreamID + MessageSize uint + BatchSize uint + AppendersCount uint + SubscribersCount uint +} + +func (tgt *Target) Valid() error { + if tgt.TopicID.Invalid() { + return fmt.Errorf("invalid topic %v", tgt.TopicID) + } + if tgt.BatchSize < 1 { + return fmt.Errorf("batch size %d", tgt.BatchSize) + } + if tgt.AppendersCount == 0 && tgt.SubscribersCount == 0 { + return errors.New("no appenders and subscribers") + } + return nil +} diff --git a/vendor/golang.org/x/exp/AUTHORS b/vendor/golang.org/x/exp/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/exp/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/exp/CONTRIBUTORS b/vendor/golang.org/x/exp/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/exp/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go new file mode 100644 index 000000000..2c033dff4 --- /dev/null +++ b/vendor/golang.org/x/exp/constraints/constraints.go @@ -0,0 +1,50 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package constraints defines a set of useful constraints to be used +// with type parameters. +package constraints + +// Signed is a constraint that permits any signed integer type. +// If future releases of Go add new predeclared signed integer types, +// this constraint will be modified to include them. +type Signed interface { + ~int | ~int8 | ~int16 | ~int32 | ~int64 +} + +// Unsigned is a constraint that permits any unsigned integer type. +// If future releases of Go add new predeclared unsigned integer types, +// this constraint will be modified to include them. +type Unsigned interface { + ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr +} + +// Integer is a constraint that permits any integer type. +// If future releases of Go add new predeclared integer types, +// this constraint will be modified to include them. +type Integer interface { + Signed | Unsigned +} + +// Float is a constraint that permits any floating-point type. +// If future releases of Go add new predeclared floating-point types, +// this constraint will be modified to include them. +type Float interface { + ~float32 | ~float64 +} + +// Complex is a constraint that permits any complex numeric type. +// If future releases of Go add new predeclared complex numeric types, +// this constraint will be modified to include them. +type Complex interface { + ~complex64 | ~complex128 +} + +// Ordered is a constraint that permits any ordered type: any type +// that supports the operators < <= >= >. +// If future releases of Go add new ordered types, +// this constraint will be modified to include them. +type Ordered interface { + Integer | Float | ~string +} diff --git a/vendor/golang.org/x/exp/rand/exp.go b/vendor/golang.org/x/exp/rand/exp.go index 4bc110f91..083867276 100644 --- a/vendor/golang.org/x/exp/rand/exp.go +++ b/vendor/golang.org/x/exp/rand/exp.go @@ -26,8 +26,7 @@ const ( // To produce a distribution with a different rate parameter, // callers can adjust the output using: // -// sample = ExpFloat64() / desiredRateParameter -// +// sample = ExpFloat64() / desiredRateParameter func (r *Rand) ExpFloat64() float64 { for { j := r.Uint32() diff --git a/vendor/golang.org/x/exp/rand/normal.go b/vendor/golang.org/x/exp/rand/normal.go index ba4ea54ca..b66da3a81 100644 --- a/vendor/golang.org/x/exp/rand/normal.go +++ b/vendor/golang.org/x/exp/rand/normal.go @@ -33,8 +33,7 @@ func absInt32(i int32) uint32 { // To produce a different normal distribution, callers can // adjust the output using: // -// sample = NormFloat64() * desiredStdDev + desiredMean -// +// sample = NormFloat64() * desiredStdDev + desiredMean func (r *Rand) NormFloat64() float64 { for { j := int32(r.Uint32()) // Possibly negative diff --git a/vendor/golang.org/x/exp/rand/rand.go b/vendor/golang.org/x/exp/rand/rand.go index a369bc775..ee6161bc6 100644 --- a/vendor/golang.org/x/exp/rand/rand.go +++ b/vendor/golang.org/x/exp/rand/rand.go @@ -212,7 +212,8 @@ func (r *Rand) Shuffle(n int, swap func(i, j int)) { // Read generates len(p) random bytes and writes them into p. It // always returns len(p) and a nil error. -// Read should not be called concurrently with any other Rand method. +// Read should not be called concurrently with any other Rand method unless +// the underlying source is a LockedSource. func (r *Rand) Read(p []byte) (n int, err error) { if lk, ok := r.src.(*LockedSource); ok { return lk.Read(p, &r.readVal, &r.readPos) @@ -246,10 +247,10 @@ func read(p []byte, src Source, readVal *uint64, readPos *int8) (n int, err erro * Top-level convenience functions */ -var globalRand = New(&LockedSource{src: NewSource(1).(*PCGSource)}) +var globalRand = New(&LockedSource{src: *NewSource(1).(*PCGSource)}) -// Type assert that globalRand's source is a LockedSource whose src is a *rngSource. -var _ *PCGSource = globalRand.src.(*LockedSource).src +// Type assert that globalRand's source is a LockedSource whose src is a PCGSource. +var _ PCGSource = globalRand.src.(*LockedSource).src // Seed uses the provided seed value to initialize the default Source to a // deterministic state. If Seed is not called, the generator behaves as @@ -320,8 +321,7 @@ func Read(p []byte) (n int, err error) { return globalRand.Read(p) } // To produce a different normal distribution, callers can // adjust the output using: // -// sample = NormFloat64() * desiredStdDev + desiredMean -// +// sample = NormFloat64() * desiredStdDev + desiredMean func NormFloat64() float64 { return globalRand.NormFloat64() } // ExpFloat64 returns an exponentially distributed float64 in the range @@ -330,15 +330,16 @@ func NormFloat64() float64 { return globalRand.NormFloat64() } // To produce a distribution with a different rate parameter, // callers can adjust the output using: // -// sample = ExpFloat64() / desiredRateParameter -// +// sample = ExpFloat64() / desiredRateParameter func ExpFloat64() float64 { return globalRand.ExpFloat64() } // LockedSource is an implementation of Source that is concurrency-safe. -// It is just a standard Source with its operations protected by a sync.Mutex. +// A Rand using a LockedSource is safe for concurrent use. +// +// The zero value of LockedSource is valid, but should be seeded before use. type LockedSource struct { lk sync.Mutex - src *PCGSource + src PCGSource } func (s *LockedSource) Uint64() (n uint64) { @@ -365,7 +366,7 @@ func (s *LockedSource) seedPos(seed uint64, readPos *int8) { // Read implements Read for a LockedSource. func (s *LockedSource) Read(p []byte, readVal *uint64, readPos *int8) (n int, err error) { s.lk.Lock() - n, err = read(p, s.src, readVal, readPos) + n, err = read(p, &s.src, readVal, readPos) s.lk.Unlock() return } diff --git a/vendor/golang.org/x/exp/rand/rng.go b/vendor/golang.org/x/exp/rand/rng.go index 17cee105f..9b79108c7 100644 --- a/vendor/golang.org/x/exp/rand/rng.go +++ b/vendor/golang.org/x/exp/rand/rng.go @@ -13,10 +13,10 @@ import ( // PCGSource is an implementation of a 64-bit permuted congruential // generator as defined in // -// PCG: A Family of Simple Fast Space-Efficient Statistically Good -// Algorithms for Random Number Generation -// Melissa E. O’Neill, Harvey Mudd College -// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf +// PCG: A Family of Simple Fast Space-Efficient Statistically Good +// Algorithms for Random Number Generation +// Melissa E. O’Neill, Harvey Mudd College +// http://www.pcg-random.org/pdf/toms-oneill-pcg-family-v1.02.pdf // // The generator here is the congruential generator PCG XSL RR 128/64 (LCG) // as found in the software available at http://www.pcg-random.org/. diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go new file mode 100644 index 000000000..6b7928ef7 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/slices.go @@ -0,0 +1,249 @@ +// Copyright 2021 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package slices defines various functions useful with slices of any type. +// Unless otherwise specified, these functions all apply to the elements +// of a slice at index 0 <= i < len(s). +// +// Note that the less function in IsSortedFunc, SortFunc, SortStableFunc requires a +// strict weak ordering (https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings), +// or the sorting may fail to sort correctly. A common case is when sorting slices of +// floating-point numbers containing NaN values. +package slices + +import "golang.org/x/exp/constraints" + +// Equal reports whether two slices are equal: the same length and all +// elements equal. If the lengths are different, Equal returns false. +// Otherwise, the elements are compared in increasing index order, and the +// comparison stops at the first unequal pair. +// Floating point NaNs are not considered equal. +func Equal[E comparable](s1, s2 []E) bool { + if len(s1) != len(s2) { + return false + } + for i := range s1 { + if s1[i] != s2[i] { + return false + } + } + return true +} + +// EqualFunc reports whether two slices are equal using a comparison +// function on each pair of elements. If the lengths are different, +// EqualFunc returns false. Otherwise, the elements are compared in +// increasing index order, and the comparison stops at the first index +// for which eq returns false. +func EqualFunc[E1, E2 any](s1 []E1, s2 []E2, eq func(E1, E2) bool) bool { + if len(s1) != len(s2) { + return false + } + for i, v1 := range s1 { + v2 := s2[i] + if !eq(v1, v2) { + return false + } + } + return true +} + +// Compare compares the elements of s1 and s2. +// The elements are compared sequentially, starting at index 0, +// until one element is not equal to the other. +// The result of comparing the first non-matching elements is returned. +// If both slices are equal until one of them ends, the shorter slice is +// considered less than the longer one. +// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. +// Comparisons involving floating point NaNs are ignored. +func Compare[E constraints.Ordered](s1, s2 []E) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + switch { + case v1 < v2: + return -1 + case v1 > v2: + return +1 + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// CompareFunc is like Compare but uses a comparison function +// on each pair of elements. The elements are compared in increasing +// index order, and the comparisons stop after the first time cmp +// returns non-zero. +// The result is the first non-zero result of cmp; if cmp always +// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), +// and +1 if len(s1) > len(s2). +func CompareFunc[E1, E2 any](s1 []E1, s2 []E2, cmp func(E1, E2) int) int { + s2len := len(s2) + for i, v1 := range s1 { + if i >= s2len { + return +1 + } + v2 := s2[i] + if c := cmp(v1, v2); c != 0 { + return c + } + } + if len(s1) < s2len { + return -1 + } + return 0 +} + +// Index returns the index of the first occurrence of v in s, +// or -1 if not present. +func Index[E comparable](s []E, v E) int { + for i, vs := range s { + if v == vs { + return i + } + } + return -1 +} + +// IndexFunc returns the first index i satisfying f(s[i]), +// or -1 if none do. +func IndexFunc[E any](s []E, f func(E) bool) int { + for i, v := range s { + if f(v) { + return i + } + } + return -1 +} + +// Contains reports whether v is present in s. +func Contains[E comparable](s []E, v E) bool { + return Index(s, v) >= 0 +} + +// Insert inserts the values v... into s at index i, +// returning the modified slice. +// In the returned slice r, r[i] == v[0]. +// Insert panics if i is out of range. +// This function is O(len(s) + len(v)). +func Insert[S ~[]E, E any](s S, i int, v ...E) S { + tot := len(s) + len(v) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[i:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[i:]) + return s2 +} + +// Delete removes the elements s[i:j] from s, returning the modified slice. +// Delete panics if s[i:j] is not a valid slice of s. +// Delete modifies the contents of the slice s; it does not create a new slice. +// Delete is O(len(s)-j), so if many items must be deleted, it is better to +// make a single call deleting them all together than to delete one at a time. +// Delete might not modify the elements s[len(s)-(j-i):len(s)]. If those +// elements contain pointers you might consider zeroing those elements so that +// objects they reference can be garbage collected. +func Delete[S ~[]E, E any](s S, i, j int) S { + _ = s[i:j] // bounds check + + return append(s[:i], s[j:]...) +} + +// Replace replaces the elements s[i:j] by the given v, and returns the +// modified slice. Replace panics if s[i:j] is not a valid slice of s. +func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { + _ = s[i:j] // verify that i:j is a valid subslice + tot := len(s[:i]) + len(v) + len(s[j:]) + if tot <= cap(s) { + s2 := s[:tot] + copy(s2[i+len(v):], s[j:]) + copy(s2[i:], v) + return s2 + } + s2 := make(S, tot) + copy(s2, s[:i]) + copy(s2[i:], v) + copy(s2[i+len(v):], s[j:]) + return s2 +} + +// Clone returns a copy of the slice. +// The elements are copied using assignment, so this is a shallow clone. +func Clone[S ~[]E, E any](s S) S { + // Preserve nil in case it matters. + if s == nil { + return nil + } + return append(S([]E{}), s...) +} + +// Compact replaces consecutive runs of equal elements with a single copy. +// This is like the uniq command found on Unix. +// Compact modifies the contents of the slice s; it does not create a new slice. +func Compact[S ~[]E, E comparable](s S) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if v != last { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// CompactFunc is like Compact but uses a comparison function. +func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { + if len(s) < 2 { + return s + } + i := 1 + last := s[0] + for _, v := range s[1:] { + if !eq(v, last) { + s[i] = v + i++ + last = v + } + } + return s[:i] +} + +// Grow increases the slice's capacity, if necessary, to guarantee space for +// another n elements. After Grow(n), at least n elements can be appended +// to the slice without another allocation. If n is negative or too large to +// allocate the memory, Grow panics. +func Grow[S ~[]E, E any](s S, n int) S { + if n < 0 { + panic("cannot be negative") + } + if n -= cap(s) - len(s); n > 0 { + // TODO(https://go.dev/issue/53888): Make using []E instead of S + // to workaround a compiler bug where the runtime.growslice optimization + // does not take effect. Revert when the compiler is fixed. + s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] + } + return s +} + +// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. +func Clip[S ~[]E, E any](s S) S { + return s[:len(s):len(s)] +} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go new file mode 100644 index 000000000..c22e74bd1 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/sort.go @@ -0,0 +1,127 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import ( + "math/bits" + + "golang.org/x/exp/constraints" +) + +// Sort sorts a slice of any ordered type in ascending order. +// Sort may fail to sort correctly when sorting slices of floating-point +// numbers containing Not-a-number (NaN) values. +// Use slices.SortFunc(x, func(a, b float64) bool {return a < b || (math.IsNaN(a) && !math.IsNaN(b))}) +// instead if the input may contain NaNs. +func Sort[E constraints.Ordered](x []E) { + n := len(x) + pdqsortOrdered(x, 0, n, bits.Len(uint(n))) +} + +// SortFunc sorts the slice x in ascending order as determined by the less function. +// This sort is not guaranteed to be stable. +// +// SortFunc requires that less is a strict weak ordering. +// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. +func SortFunc[E any](x []E, less func(a, b E) bool) { + n := len(x) + pdqsortLessFunc(x, 0, n, bits.Len(uint(n)), less) +} + +// SortStable sorts the slice x while keeping the original order of equal +// elements, using less to compare elements. +func SortStableFunc[E any](x []E, less func(a, b E) bool) { + stableLessFunc(x, len(x), less) +} + +// IsSorted reports whether x is sorted in ascending order. +func IsSorted[E constraints.Ordered](x []E) bool { + for i := len(x) - 1; i > 0; i-- { + if x[i] < x[i-1] { + return false + } + } + return true +} + +// IsSortedFunc reports whether x is sorted in ascending order, with less as the +// comparison function. +func IsSortedFunc[E any](x []E, less func(a, b E) bool) bool { + for i := len(x) - 1; i > 0; i-- { + if less(x[i], x[i-1]) { + return false + } + } + return true +} + +// BinarySearch searches for target in a sorted slice and returns the position +// where target is found, or the position where target would appear in the +// sort order; it also returns a bool saying whether the target is really found +// in the slice. The slice must be sorted in increasing order. +func BinarySearch[E constraints.Ordered](x []E, target E) (int, bool) { + // search returns the leftmost position where f returns true, or len(x) if f + // returns false for all x. This is the insertion position for target in x, + // and could point to an element that's either == target or not. + pos := search(len(x), func(i int) bool { return x[i] >= target }) + if pos >= len(x) || x[pos] != target { + return pos, false + } else { + return pos, true + } +} + +// BinarySearchFunc works like BinarySearch, but uses a custom comparison +// function. The slice must be sorted in increasing order, where "increasing" is +// defined by cmp. cmp(a, b) is expected to return an integer comparing the two +// parameters: 0 if a == b, a negative number if a < b and a positive number if +// a > b. +func BinarySearchFunc[E any](x []E, target E, cmp func(E, E) int) (int, bool) { + pos := search(len(x), func(i int) bool { return cmp(x[i], target) >= 0 }) + if pos >= len(x) || cmp(x[pos], target) != 0 { + return pos, false + } else { + return pos, true + } +} + +func search(n int, f func(int) bool) int { + // Define f(-1) == false and f(n) == true. + // Invariant: f(i-1) == false, f(j) == true. + i, j := 0, n + for i < j { + h := int(uint(i+j) >> 1) // avoid overflow when computing h + // i ≤ h < j + if !f(h) { + i = h + 1 // preserves f(i-1) == false + } else { + j = h // preserves f(j) == true + } + } + // i == j, f(i-1) == false, and f(j) (= f(i)) == true => answer is i. + return i +} + +type sortedHint int // hint for pdqsort when choosing the pivot + +const ( + unknownHint sortedHint = iota + increasingHint + decreasingHint +) + +// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf +type xorshift uint64 + +func (r *xorshift) Next() uint64 { + *r ^= *r << 13 + *r ^= *r >> 17 + *r ^= *r << 5 + return uint64(*r) +} + +func nextPowerOfTwo(length int) uint { + return 1 << bits.Len(uint(length)) +} diff --git a/vendor/golang.org/x/exp/slices/zsortfunc.go b/vendor/golang.org/x/exp/slices/zsortfunc.go new file mode 100644 index 000000000..2a632476c --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortfunc.go @@ -0,0 +1,479 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +// insertionSortLessFunc sorts data[a:b] using insertion sort. +func insertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + for i := a + 1; i < b; i++ { + for j := i; j > a && less(data[j], data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownLessFunc implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownLessFunc[E any](data []E, lo, hi, first int, less func(a, b E) bool) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && less(data[first+child], data[first+child+1]) { + child++ + } + if !less(data[first+root], data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownLessFunc(data, i, hi, first, less) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownLessFunc(data, lo, i, first, less) + } +} + +// pdqsortLessFunc sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortLessFunc[E any](data []E, a, b, limit int, less func(a, b E) bool) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortLessFunc(data, a, b, less) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortLessFunc(data, a, b, less) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsLessFunc(data, a, b, less) + limit-- + } + + pivot, hint := choosePivotLessFunc(data, a, b, less) + if hint == decreasingHint { + reverseRangeLessFunc(data, a, b, less) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortLessFunc(data, a, b, less) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !less(data[a-1], data[pivot]) { + mid := partitionEqualLessFunc(data, a, b, pivot, less) + a = mid + continue + } + + mid, alreadyPartitioned := partitionLessFunc(data, a, b, pivot, less) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortLessFunc(data, a, mid, limit, less) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortLessFunc(data, mid+1, b, limit, less) + b = mid + } + } +} + +// partitionLessFunc does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && less(data[i], data[a]) { + i++ + } + for i <= j && !less(data[j], data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualLessFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualLessFunc[E any](data []E, a, b, pivot int, less func(a, b E) bool) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !less(data[a], data[i]) { + i++ + } + for i <= j && less(data[a], data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortLessFunc partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortLessFunc[E any](data []E, a, b int, less func(a, b E) bool) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !less(data[i], data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !less(data[j], data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsLessFunc scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotLessFunc chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotLessFunc[E any](data []E, a, b int, less func(a, b E) bool) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentLessFunc(data, i, &swaps, less) + j = medianAdjacentLessFunc(data, j, &swaps, less) + k = medianAdjacentLessFunc(data, k, &swaps, less) + } + // Find the median among i, j, k and stores it into j. + j = medianLessFunc(data, i, j, k, &swaps, less) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2LessFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2LessFunc[E any](data []E, a, b int, swaps *int, less func(a, b E) bool) (int, int) { + if less(data[b], data[a]) { + *swaps++ + return b, a + } + return a, b +} + +// medianLessFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianLessFunc[E any](data []E, a, b, c int, swaps *int, less func(a, b E) bool) int { + a, b = order2LessFunc(data, a, b, swaps, less) + b, c = order2LessFunc(data, b, c, swaps, less) + a, b = order2LessFunc(data, a, b, swaps, less) + return b +} + +// medianAdjacentLessFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentLessFunc[E any](data []E, a int, swaps *int, less func(a, b E) bool) int { + return medianLessFunc(data, a-1, a, a+1, swaps, less) +} + +func reverseRangeLessFunc[E any](data []E, a, b int, less func(a, b E) bool) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeLessFunc[E any](data []E, a, b, n int, less func(a, b E) bool) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableLessFunc[E any](data []E, n int, less func(a, b E) bool) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortLessFunc(data, a, b, less) + a = b + b += blockSize + } + insertionSortLessFunc(data, a, n, less) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeLessFunc(data, a, a+blockSize, b, less) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeLessFunc(data, a, m, n, less) + } + blockSize *= 2 + } +} + +// symMergeLessFunc merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if less(data[h], data[a]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !less(data[m], data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !less(data[p-c], data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateLessFunc(data, start, m, end, less) + } + if a < start && start < mid { + symMergeLessFunc(data, a, start, mid, less) + } + if mid < end && end < b { + symMergeLessFunc(data, mid, end, b, less) + } +} + +// rotateLessFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateLessFunc[E any](data []E, a, m, b int, less func(a, b E) bool) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeLessFunc(data, m-i, m, j, less) + i -= j + } else { + swapRangeLessFunc(data, m-i, m+j-i, i, less) + j -= i + } + } + // i == j + swapRangeLessFunc(data, m-i, m, i, less) +} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go new file mode 100644 index 000000000..efaa1c8b7 --- /dev/null +++ b/vendor/golang.org/x/exp/slices/zsortordered.go @@ -0,0 +1,481 @@ +// Code generated by gen_sort_variants.go; DO NOT EDIT. + +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slices + +import "golang.org/x/exp/constraints" + +// insertionSortOrdered sorts data[a:b] using insertion sort. +func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { + for i := a + 1; i < b; i++ { + for j := i; j > a && (data[j] < data[j-1]); j-- { + data[j], data[j-1] = data[j-1], data[j] + } + } +} + +// siftDownOrdered implements the heap property on data[lo:hi]. +// first is an offset into the array where the root of the heap lies. +func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { + root := lo + for { + child := 2*root + 1 + if child >= hi { + break + } + if child+1 < hi && (data[first+child] < data[first+child+1]) { + child++ + } + if !(data[first+root] < data[first+child]) { + return + } + data[first+root], data[first+child] = data[first+child], data[first+root] + root = child + } +} + +func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { + first := a + lo := 0 + hi := b - a + + // Build heap with greatest element at top. + for i := (hi - 1) / 2; i >= 0; i-- { + siftDownOrdered(data, i, hi, first) + } + + // Pop elements, largest first, into end of data. + for i := hi - 1; i >= 0; i-- { + data[first], data[first+i] = data[first+i], data[first] + siftDownOrdered(data, lo, i, first) + } +} + +// pdqsortOrdered sorts data[a:b]. +// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. +// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf +// C++ implementation: https://github.com/orlp/pdqsort +// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ +// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. +func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { + const maxInsertion = 12 + + var ( + wasBalanced = true // whether the last partitioning was reasonably balanced + wasPartitioned = true // whether the slice was already partitioned + ) + + for { + length := b - a + + if length <= maxInsertion { + insertionSortOrdered(data, a, b) + return + } + + // Fall back to heapsort if too many bad choices were made. + if limit == 0 { + heapSortOrdered(data, a, b) + return + } + + // If the last partitioning was imbalanced, we need to breaking patterns. + if !wasBalanced { + breakPatternsOrdered(data, a, b) + limit-- + } + + pivot, hint := choosePivotOrdered(data, a, b) + if hint == decreasingHint { + reverseRangeOrdered(data, a, b) + // The chosen pivot was pivot-a elements after the start of the array. + // After reversing it is pivot-a elements before the end of the array. + // The idea came from Rust's implementation. + pivot = (b - 1) - (pivot - a) + hint = increasingHint + } + + // The slice is likely already sorted. + if wasBalanced && wasPartitioned && hint == increasingHint { + if partialInsertionSortOrdered(data, a, b) { + return + } + } + + // Probably the slice contains many duplicate elements, partition the slice into + // elements equal to and elements greater than the pivot. + if a > 0 && !(data[a-1] < data[pivot]) { + mid := partitionEqualOrdered(data, a, b, pivot) + a = mid + continue + } + + mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) + wasPartitioned = alreadyPartitioned + + leftLen, rightLen := mid-a, b-mid + balanceThreshold := length / 8 + if leftLen < rightLen { + wasBalanced = leftLen >= balanceThreshold + pdqsortOrdered(data, a, mid, limit) + a = mid + 1 + } else { + wasBalanced = rightLen >= balanceThreshold + pdqsortOrdered(data, mid+1, b, limit) + b = mid + } + } +} + +// partitionOrdered does one quicksort partition. +// Let p = data[pivot] +// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. +// On return, data[newpivot] = p +func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + data[j], data[a] = data[a], data[j] + return j, true + } + data[i], data[j] = data[j], data[i] + i++ + j-- + + for { + for i <= j && (data[i] < data[a]) { + i++ + } + for i <= j && !(data[j] < data[a]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + data[j], data[a] = data[a], data[j] + return j, false +} + +// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. +// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. +func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { + data[a], data[pivot] = data[pivot], data[a] + i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned + + for { + for i <= j && !(data[a] < data[i]) { + i++ + } + for i <= j && (data[a] < data[j]) { + j-- + } + if i > j { + break + } + data[i], data[j] = data[j], data[i] + i++ + j-- + } + return i +} + +// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. +func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { + const ( + maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted + shortestShifting = 50 // don't shift any elements on short arrays + ) + i := a + 1 + for j := 0; j < maxSteps; j++ { + for i < b && !(data[i] < data[i-1]) { + i++ + } + + if i == b { + return true + } + + if b-a < shortestShifting { + return false + } + + data[i], data[i-1] = data[i-1], data[i] + + // Shift the smaller one to the left. + if i-a >= 2 { + for j := i - 1; j >= 1; j-- { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + // Shift the greater one to the right. + if b-i >= 2 { + for j := i + 1; j < b; j++ { + if !(data[j] < data[j-1]) { + break + } + data[j], data[j-1] = data[j-1], data[j] + } + } + } + return false +} + +// breakPatternsOrdered scatters some elements around in an attempt to break some patterns +// that might cause imbalanced partitions in quicksort. +func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { + length := b - a + if length >= 8 { + random := xorshift(length) + modulus := nextPowerOfTwo(length) + + for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { + other := int(uint(random.Next()) & (modulus - 1)) + if other >= length { + other -= length + } + data[idx], data[a+other] = data[a+other], data[idx] + } + } +} + +// choosePivotOrdered chooses a pivot in data[a:b]. +// +// [0,8): chooses a static pivot. +// [8,shortestNinther): uses the simple median-of-three method. +// [shortestNinther,∞): uses the Tukey ninther method. +func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { + const ( + shortestNinther = 50 + maxSwaps = 4 * 3 + ) + + l := b - a + + var ( + swaps int + i = a + l/4*1 + j = a + l/4*2 + k = a + l/4*3 + ) + + if l >= 8 { + if l >= shortestNinther { + // Tukey ninther method, the idea came from Rust's implementation. + i = medianAdjacentOrdered(data, i, &swaps) + j = medianAdjacentOrdered(data, j, &swaps) + k = medianAdjacentOrdered(data, k, &swaps) + } + // Find the median among i, j, k and stores it into j. + j = medianOrdered(data, i, j, k, &swaps) + } + + switch swaps { + case 0: + return j, increasingHint + case maxSwaps: + return j, decreasingHint + default: + return j, unknownHint + } +} + +// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. +func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { + if data[b] < data[a] { + *swaps++ + return b, a + } + return a, b +} + +// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. +func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { + a, b = order2Ordered(data, a, b, swaps) + b, c = order2Ordered(data, b, c, swaps) + a, b = order2Ordered(data, a, b, swaps) + return b +} + +// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. +func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { + return medianOrdered(data, a-1, a, a+1, swaps) +} + +func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { + i := a + j := b - 1 + for i < j { + data[i], data[j] = data[j], data[i] + i++ + j-- + } +} + +func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { + for i := 0; i < n; i++ { + data[a+i], data[b+i] = data[b+i], data[a+i] + } +} + +func stableOrdered[E constraints.Ordered](data []E, n int) { + blockSize := 20 // must be > 0 + a, b := 0, blockSize + for b <= n { + insertionSortOrdered(data, a, b) + a = b + b += blockSize + } + insertionSortOrdered(data, a, n) + + for blockSize < n { + a, b = 0, 2*blockSize + for b <= n { + symMergeOrdered(data, a, a+blockSize, b) + a = b + b += 2 * blockSize + } + if m := a + blockSize; m < n { + symMergeOrdered(data, a, m, n) + } + blockSize *= 2 + } +} + +// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using +// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum +// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz +// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in +// Computer Science, pages 714-723. Springer, 2004. +// +// Let M = m-a and N = b-n. Wolog M < N. +// The recursion depth is bound by ceil(log(N+M)). +// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. +// The algorithm needs O((M+N)*log(M)) calls to data.Swap. +// +// The paper gives O((M+N)*log(M)) as the number of assignments assuming a +// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation +// in the paper carries through for Swap operations, especially as the block +// swapping rotate uses only O(M+N) Swaps. +// +// symMerge assumes non-degenerate arguments: a < m && m < b. +// Having the caller check this condition eliminates many leaf recursion calls, +// which improves performance. +func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[a] into data[m:b] + // if data[a:m] only contains one element. + if m-a == 1 { + // Use binary search to find the lowest index i + // such that data[i] >= data[a] for m <= i < b. + // Exit the search loop with i == b in case no such index exists. + i := m + j := b + for i < j { + h := int(uint(i+j) >> 1) + if data[h] < data[a] { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[a] reaches the position before i. + for k := a; k < i-1; k++ { + data[k], data[k+1] = data[k+1], data[k] + } + return + } + + // Avoid unnecessary recursions of symMerge + // by direct insertion of data[m] into data[a:m] + // if data[m:b] only contains one element. + if b-m == 1 { + // Use binary search to find the lowest index i + // such that data[i] > data[m] for a <= i < m. + // Exit the search loop with i == m in case no such index exists. + i := a + j := m + for i < j { + h := int(uint(i+j) >> 1) + if !(data[m] < data[h]) { + i = h + 1 + } else { + j = h + } + } + // Swap values until data[m] reaches the position i. + for k := m; k > i; k-- { + data[k], data[k-1] = data[k-1], data[k] + } + return + } + + mid := int(uint(a+b) >> 1) + n := mid + m + var start, r int + if m > mid { + start = n - b + r = mid + } else { + start = a + r = m + } + p := n - 1 + + for start < r { + c := int(uint(start+r) >> 1) + if !(data[p-c] < data[c]) { + start = c + 1 + } else { + r = c + } + } + + end := n - start + if start < m && m < end { + rotateOrdered(data, start, m, end) + } + if a < start && start < mid { + symMergeOrdered(data, a, start, mid) + } + if mid < end && end < b { + symMergeOrdered(data, mid, end, b) + } +} + +// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: +// Data of the form 'x u v y' is changed to 'x v u y'. +// rotate performs at most b-a many calls to data.Swap, +// and it assumes non-degenerate arguments: a < m && m < b. +func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { + i := m - a + j := b - m + + for i != j { + if i > j { + swapRangeOrdered(data, m-i, m, j) + i -= j + } else { + swapRangeOrdered(data, m-i, m+j-i, i) + j -= i + } + } + // i == j + swapRangeOrdered(data, m-i, m, i) +} diff --git a/vendor/golang.org/x/exp/slog/attr.go b/vendor/golang.org/x/exp/slog/attr.go new file mode 100644 index 000000000..29431cb7a --- /dev/null +++ b/vendor/golang.org/x/exp/slog/attr.go @@ -0,0 +1,84 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "fmt" + "time" +) + +// An Attr is a key-value pair. +type Attr struct { + Key string + Value Value +} + +// String returns an Attr for a string value. +func String(key, value string) Attr { + return Attr{key, StringValue(value)} +} + +// Int64 returns an Attr for an int64. +func Int64(key string, value int64) Attr { + return Attr{key, Int64Value(value)} +} + +// Int converts an int to an int64 and returns +// an Attr with that value. +func Int(key string, value int) Attr { + return Int64(key, int64(value)) +} + +// Uint64 returns an Attr for a uint64. +func Uint64(key string, v uint64) Attr { + return Attr{key, Uint64Value(v)} +} + +// Float64 returns an Attr for a floating-point number. +func Float64(key string, v float64) Attr { + return Attr{key, Float64Value(v)} +} + +// Bool returns an Attr for a bool. +func Bool(key string, v bool) Attr { + return Attr{key, BoolValue(v)} +} + +// Time returns an Attr for a time.Time. +// It discards the monotonic portion. +func Time(key string, v time.Time) Attr { + return Attr{key, TimeValue(v)} +} + +// Duration returns an Attr for a time.Duration. +func Duration(key string, v time.Duration) Attr { + return Attr{key, DurationValue(v)} +} + +// Group returns an Attr for a Group Value. +// The caller must not subsequently mutate the +// argument slice. +// +// Use Group to collect several Attrs under a single +// key on a log line, or as the result of LogValue +// in order to log a single value as multiple Attrs. +func Group(key string, as ...Attr) Attr { + return Attr{key, GroupValue(as...)} +} + +// Any returns an Attr for the supplied value. +// See [Value.AnyValue] for how values are treated. +func Any(key string, value any) Attr { + return Attr{key, AnyValue(value)} +} + +// Equal reports whether a and b have equal keys and values. +func (a Attr) Equal(b Attr) bool { + return a.Key == b.Key && a.Value.Equal(b.Value) +} + +func (a Attr) String() string { + return fmt.Sprintf("%s=%s", a.Key, a.Value) +} diff --git a/vendor/golang.org/x/exp/slog/context.go b/vendor/golang.org/x/exp/slog/context.go new file mode 100644 index 000000000..504adaf91 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/context.go @@ -0,0 +1,30 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import "context" + +type contextKey struct{} + +// NewContext returns a context that contains the given Logger. +// Use FromContext to retrieve the Logger. +func NewContext(ctx context.Context, l *Logger) context.Context { + return context.WithValue(ctx, contextKey{}, l) +} + +// FromContext returns the Logger stored in ctx by NewContext, or the default +// Logger if there is none. +func FromContext(ctx context.Context) *Logger { + if l, ok := ctx.Value(contextKey{}).(*Logger); ok { + return l + } + return Default() +} + +// Ctx retrieves a Logger from the given context using FromContext. Then it adds +// the given context to the Logger using WithContext and returns the result. +func Ctx(ctx context.Context) *Logger { + return FromContext(ctx).WithContext(ctx) +} diff --git a/vendor/golang.org/x/exp/slog/doc.go b/vendor/golang.org/x/exp/slog/doc.go new file mode 100644 index 000000000..013b5ed61 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/doc.go @@ -0,0 +1,107 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +/* +Package slog provides structured logging, +in which log records include a message, +a severity level, and various other attributes +expressed as key-value pairs. + +It defines a type, [Logger], +which provides several methods (such as [Logger.Info] and [Logger.Error]) +for reporting events of interest. + +Each Logger is associated with a [Handler]. +A Logger output method creates a [Record] from the method arguments +and passes it to the Handler, which decides how to handle it. +There is a default Logger accessible through top-level functions +(such as [Info] and [Error]) that call the corresponding Logger methods. + +A log record consists of a time, a level, a message, and a set of key-value +pairs, where the keys are strings and the values may be of any type. +As an example, + + slog.Info("hello", "count", 3) + +creates a record containing the time of the call, +a level of Info, the message "hello", and a single +pair with key "count" and value 3. + +The [Info] top-level function calls the [Logger.Info] method on the default Logger. +In addition to [Logger.Info], there are methods for Debug, Warn and Error levels. +Besides these convenience methods for common levels, +there is also a [Logger.Log] method which takes the level as an argument. +Each of these methods has a corresponding top-level function that uses the +default logger. + +The default handler formats the log record's message, time, level, and attributes +as a string and passes it to the [log] package." + + 2022/11/08 15:28:26 INFO hello count=3 + +For more control over the output format, create a logger with a different handler. +This statement uses [New] to create a new logger with a TextHandler +that writes structured records in text form to standard error: + + logger := slog.New(slog.NewTextHandler(os.Stderr)) + +[TextHandler] output is a sequence of key=value pairs, easily and unambiguously +parsed by machine. This statement: + + logger.Info("hello", "count", 3) + +produces this output: + + time=2022-11-08T15:28:26.000-05:00 level=INFO msg=hello count=3 + +The package also provides [JSONHandler], whose output is line-delimited JSON: + + logger := slog.New(slog.NewJSONHandler(os.Stdout)) + logger.Info("hello", "count", 3) + +produces this output: + + {"time":"2022-11-08T15:28:26.000000000-05:00","level":"INFO","msg":"hello","count":3} + +Setting a logger as the default with + + slog.SetDefault(logger) + +will cause the top-level functions like [Info] to use it. +[SetDefault] also updates the default logger used by the [log] package, +so that existing applications that use [log.Printf] and related functions +will send log records to the logger's handler without needing to be rewritten. + + +# Attrs and Values + +# Levels + +# Configuring the built-in handlers + +TODO: cover HandlerOptions, Leveler, LevelVar + +# Groups + +# Contexts + +# Advanced topics + +## Customizing a type's logging behavior + +TODO: discuss LogValuer + +## Wrapping output methods + +TODO: discuss LogDepth, LogAttrDepth + +## Interoperating with other logging packabes + +TODO: discuss NewRecord, Record.AddAttrs + +## Writing a handler + +*/ diff --git a/vendor/golang.org/x/exp/slog/handler.go b/vendor/golang.org/x/exp/slog/handler.go new file mode 100644 index 000000000..e5216d155 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/handler.go @@ -0,0 +1,492 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "fmt" + "io" + "strconv" + "sync" + "time" + + "golang.org/x/exp/slices" + "golang.org/x/exp/slog/internal/buffer" +) + +// A Handler handles log records produced by a Logger.. +// +// A typical handler may print log records to standard error, +// or write them to a file or database, or perhaps augment them +// with additional attributes and pass them on to another handler. +// +// Any of the Handler's methods may be called concurrently with itself +// or with other methods. It is the responsibility of the Handler to +// manage this concurrency. +type Handler interface { + // Enabled reports whether the handler handles records at the given level. + // The handler ignores records whose level is lower. + // Enabled is called early, before any arguments are processed, + // to save effort if the log event should be discarded. + Enabled(Level) bool + + // Handle handles the Record. + // It will only be called if Enabled returns true. + // Handle methods that produce output should observe the following rules: + // - If r.Time is the zero time, ignore the time. + // - If an Attr's key is the empty string, ignore the Attr. + Handle(r Record) error + + // WithAttrs returns a new Handler whose attributes consist of + // both the receiver's attributes and the arguments. + // The Handler owns the slice: it may retain, modify or discard it. + WithAttrs(attrs []Attr) Handler + + // WithGroup returns a new Handler with the given group appended to + // the receiver's existing groups. + // The keys of all subsequent attributes, whether added by With or in a + // Record, should be qualified by the sequence of group names. + // + // How this qualification happens is up to the Handler, so long as + // this Handler's attribute keys differ from those of another Handler + // with a different sequence of group names. + // + // A Handler should treat WithGroup as starting a Group of Attrs that ends + // at the end of the log event. That is, + // + // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2)) + // + // should behave like + // + // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) + WithGroup(name string) Handler +} + +type defaultHandler struct { + ch *commonHandler + // log.Output, except for testing + output func(calldepth int, message string) error +} + +func newDefaultHandler(output func(int, string) error) *defaultHandler { + return &defaultHandler{ + ch: &commonHandler{json: false}, + output: output, + } +} + +func (*defaultHandler) Enabled(l Level) bool { + return l >= InfoLevel +} + +// Collect the level, attributes and message in a string and +// write it with the default log.Logger. +// Let the log.Logger handle time and file/line. +func (h *defaultHandler) Handle(r Record) error { + buf := buffer.New() + defer buf.Free() + buf.WriteString(r.Level.String()) + buf.WriteByte(' ') + buf.WriteString(r.Message) + state := handleState{h: h.ch, buf: buf, sep: " "} + state.appendNonBuiltIns(r) + // 4 = log.Output depth + handlerWriter.Write + defaultHandler.Handle + return h.output(4, buf.String()) +} + +func (h *defaultHandler) WithAttrs(as []Attr) Handler { + return &defaultHandler{h.ch.withAttrs(as), h.output} +} + +func (h *defaultHandler) WithGroup(name string) Handler { + return &defaultHandler{h.ch.withGroup(name), h.output} +} + +// HandlerOptions are options for a TextHandler or JSONHandler. +// A zero HandlerOptions consists entirely of default values. +type HandlerOptions struct { + // When AddSource is true, the handler adds a ("source", "file:line") + // attribute to the output indicating the source code position of the log + // statement. AddSource is false by default to skip the cost of computing + // this information. + AddSource bool + + // Level reports the minimum record level that will be logged. + // The handler discards records with lower levels. + // If Level is nil, the handler assumes InfoLevel. + // The handler calls Level.Level for each record processed; + // to adjust the minimum level dynamically, use a LevelVar. + Level Leveler + + // ReplaceAttr is called to rewrite each attribute before it is logged. + // If ReplaceAttr returns an Attr with Key == "", the attribute is discarded. + // + // The built-in attributes with keys "time", "level", "source", and "msg" + // are passed to this function first, except that time and level are omitted + // if zero, and source is omitted if AddSourceLine is false. + // + // ReplaceAttr can be used to change the default keys of the built-in + // attributes, convert types (for example, to replace a `time.Time` with the + // integer seconds since the Unix epoch), sanitize personal information, or + // remove attributes from the output. + ReplaceAttr func(a Attr) Attr +} + +// Keys for "built-in" attributes. +const ( + // TimeKey is the key used by the built-in handlers for the time + // when the log method is called. The associated Value is a [time.Time]. + TimeKey = "time" + // LevelKey is the key used by the built-in handlers for the level + // of the log call. The associated value is a [Level]. + LevelKey = "level" + // MessageKey is the key used by the built-in handlers for the + // message of the log call. The associated value is a string. + MessageKey = "msg" + // SourceKey is the key used by the built-in handlers for the source file + // and line of the log call. The associated value is a string. + SourceKey = "source" +) + +type commonHandler struct { + json bool // true => output JSON; false => output text + opts HandlerOptions + preformattedAttrs []byte + groupPrefix string // for text: prefix of groups opened in preformatting + groups []string // all groups started from WithGroup + nOpenGroups int // the number of groups opened in preformattedAttrs + mu sync.Mutex + w io.Writer +} + +func (h *commonHandler) clone() *commonHandler { + // We can't use assignment because we can't copy the mutex. + return &commonHandler{ + json: h.json, + opts: h.opts, + preformattedAttrs: slices.Clip(h.preformattedAttrs), + groupPrefix: h.groupPrefix, + groups: slices.Clip(h.groups), + nOpenGroups: h.nOpenGroups, + w: h.w, + } +} + +// Enabled reports whether l is greater than or equal to the +// minimum level. +func (h *commonHandler) enabled(l Level) bool { + minLevel := InfoLevel + if h.opts.Level != nil { + minLevel = h.opts.Level.Level() + } + return l >= minLevel +} + +func (h *commonHandler) withAttrs(as []Attr) *commonHandler { + h2 := h.clone() + // Pre-format the attributes as an optimization. + prefix := buffer.New() + defer prefix.Free() + prefix.WriteString(h.groupPrefix) + state := handleState{ + h: h2, + buf: (*buffer.Buffer)(&h2.preformattedAttrs), + sep: "", + prefix: prefix, + } + if len(h2.preformattedAttrs) > 0 { + state.sep = h.attrSep() + } + state.openGroups() + for _, a := range as { + state.appendAttr(a) + } + // Remember the new prefix for later keys. + h2.groupPrefix = state.prefix.String() + // Remember how many opened groups are in preformattedAttrs, + // so we don't open them again when we handle a Record. + h2.nOpenGroups = len(h2.groups) + return h2 +} + +func (h *commonHandler) withGroup(name string) *commonHandler { + h2 := h.clone() + h2.groups = append(h2.groups, name) + return h2 +} + +func (h *commonHandler) handle(r Record) error { + rep := h.opts.ReplaceAttr + state := handleState{h: h, buf: buffer.New(), sep: ""} + defer state.buf.Free() + if h.json { + state.buf.WriteByte('{') + } + // Built-in attributes. They are not in a group. + // time + if !r.Time.IsZero() { + key := TimeKey + val := r.Time.Round(0) // strip monotonic to match Attr behavior + if rep == nil { + state.appendKey(key) + state.appendTime(val) + } else { + state.appendAttr(Time(key, val)) + } + } + // level + key := LevelKey + val := r.Level + if rep == nil { + state.appendKey(key) + state.appendString(val.String()) + } else { + state.appendAttr(Any(key, val)) + } + // source + if h.opts.AddSource { + file, line := r.SourceLine() + if file != "" { + key := SourceKey + if rep == nil { + state.appendKey(key) + state.appendSource(file, line) + } else { + buf := buffer.New() + buf.WriteString(file) // TODO: escape? + buf.WriteByte(':') + buf.WritePosInt(line) + s := buf.String() + buf.Free() + state.appendAttr(String(key, s)) + } + } + } + key = MessageKey + msg := r.Message + if rep == nil { + state.appendKey(key) + state.appendString(msg) + } else { + state.appendAttr(String(key, msg)) + } + state.appendNonBuiltIns(r) + state.buf.WriteByte('\n') + + h.mu.Lock() + defer h.mu.Unlock() + _, err := h.w.Write(*state.buf) + return err +} + +func (s *handleState) appendNonBuiltIns(r Record) { + // preformatted Attrs + if len(s.h.preformattedAttrs) > 0 { + s.buf.WriteString(s.sep) + s.buf.Write(s.h.preformattedAttrs) + s.sep = s.h.attrSep() + } + // Attrs in Record -- unlike the built-in ones, they are in groups started + // from WithGroup. + s.prefix = buffer.New() + defer s.prefix.Free() + s.prefix.WriteString(s.h.groupPrefix) + s.openGroups() + r.Attrs(func(a Attr) { + s.appendAttr(a) + }) + if s.h.json { + // Close all open groups. + for range s.h.groups { + s.buf.WriteByte('}') + } + // Close the top-level object. + s.buf.WriteByte('}') + } +} + +// attrSep returns the separator between attributes. +func (h *commonHandler) attrSep() string { + if h.json { + return "," + } + return " " +} + +// handleState holds state for a single call to commonHandler.handle. +// The initial value of sep determines whether to emit a separator +// before the next key, after which it stays true. +type handleState struct { + h *commonHandler + buf *buffer.Buffer + sep string // separator to write before next key + prefix *buffer.Buffer // for text: key prefix +} + +func (s *handleState) openGroups() { + for _, n := range s.h.groups[s.h.nOpenGroups:] { + s.openGroup(n) + } +} + +// Separator for group names and keys. +const keyComponentSep = '.' + +// openGroup starts a new group of attributes +// with the given name. +func (s *handleState) openGroup(name string) { + if s.h.json { + s.appendKey(name) + s.buf.WriteByte('{') + s.sep = "" + } else { + s.prefix.WriteString(name) + s.prefix.WriteByte(keyComponentSep) + } +} + +// closeGroup ends the group with the given name. +func (s *handleState) closeGroup(name string) { + if s.h.json { + s.buf.WriteByte('}') + } else { + (*s.prefix) = (*s.prefix)[:len(*s.prefix)-len(name)-1 /* forkeyComponentSep */] + } + s.sep = s.h.attrSep() +} + +// appendAttr appends the Attr's key and value using app. +// If sep is true, it also prepends a separator. +// It handles replacement and checking for an empty key. +// It sets sep to true if it actually did the append (if the key was non-empty +// after replacement). +func (s *handleState) appendAttr(a Attr) { + if rep := s.h.opts.ReplaceAttr; rep != nil { + a = rep(a) + } + if a.Key == "" { + return + } + v := a.Value.Resolve() + if v.Kind() == GroupKind { + s.openGroup(a.Key) + for _, aa := range v.Group() { + s.appendAttr(aa) + } + s.closeGroup(a.Key) + } else { + s.appendKey(a.Key) + s.appendValue(v) + } +} + +func (s *handleState) appendError(err error) { + s.appendString(fmt.Sprintf("!ERROR:%v", err)) +} + +func (s *handleState) appendKey(key string) { + s.buf.WriteString(s.sep) + if s.prefix != nil { + // TODO: optimize by avoiding allocation. + s.appendString(string(*s.prefix) + key) + } else { + s.appendString(key) + } + if s.h.json { + s.buf.WriteByte(':') + } else { + s.buf.WriteByte('=') + } + s.sep = s.h.attrSep() +} + +func (s *handleState) appendSource(file string, line int) { + if s.h.json { + s.buf.WriteByte('"') + *s.buf = appendEscapedJSONString(*s.buf, file) + s.buf.WriteByte(':') + s.buf.WritePosInt(line) + s.buf.WriteByte('"') + } else { + // text + if needsQuoting(file) { + s.appendString(file + ":" + strconv.Itoa(line)) + } else { + // common case: no quoting needed. + s.appendString(file) + s.buf.WriteByte(':') + s.buf.WritePosInt(line) + } + } +} + +func (s *handleState) appendString(str string) { + if s.h.json { + s.buf.WriteByte('"') + *s.buf = appendEscapedJSONString(*s.buf, str) + s.buf.WriteByte('"') + } else { + // text + if needsQuoting(str) { + *s.buf = strconv.AppendQuote(*s.buf, str) + } else { + s.buf.WriteString(str) + } + } +} + +func (s *handleState) appendValue(v Value) { + var err error + if s.h.json { + err = appendJSONValue(s, v) + } else { + err = appendTextValue(s, v) + } + if err != nil { + s.appendError(err) + } +} + +func (s *handleState) appendTime(t time.Time) { + if s.h.json { + appendJSONTime(s, t) + } else { + writeTimeRFC3339Millis(s.buf, t) + } +} + +// This takes half the time of Time.AppendFormat. +func writeTimeRFC3339Millis(buf *buffer.Buffer, t time.Time) { + year, month, day := t.Date() + buf.WritePosIntWidth(year, 4) + buf.WriteByte('-') + buf.WritePosIntWidth(int(month), 2) + buf.WriteByte('-') + buf.WritePosIntWidth(day, 2) + buf.WriteByte('T') + hour, min, sec := t.Clock() + buf.WritePosIntWidth(hour, 2) + buf.WriteByte(':') + buf.WritePosIntWidth(min, 2) + buf.WriteByte(':') + buf.WritePosIntWidth(sec, 2) + ns := t.Nanosecond() + buf.WriteByte('.') + buf.WritePosIntWidth(ns/1e6, 3) + _, offsetSeconds := t.Zone() + if offsetSeconds == 0 { + buf.WriteByte('Z') + } else { + offsetMinutes := offsetSeconds / 60 + if offsetMinutes < 0 { + buf.WriteByte('-') + offsetMinutes = -offsetMinutes + } else { + buf.WriteByte('+') + } + buf.WritePosIntWidth(offsetMinutes/60, 2) + buf.WriteByte(':') + buf.WritePosIntWidth(offsetMinutes%60, 2) + } +} diff --git a/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go new file mode 100644 index 000000000..02cd08e04 --- /dev/null +++ b/vendor/golang.org/x/exp/slog/internal/buffer/buffer.go @@ -0,0 +1,84 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package buffer provides a pool-allocated byte buffer. +package buffer + +import ( + "sync" +) + +// buffer adapted from go/src/fmt/print.go +type Buffer []byte + +// Having an initial size gives a dramatic speedup. +var bufPool = sync.Pool{ + New: func() any { + b := make([]byte, 0, 1024) + return (*Buffer)(&b) + }, +} + +func New() *Buffer { + return bufPool.Get().(*Buffer) +} + +func (b *Buffer) Free() { + // To reduce peak allocation, return only smaller buffers to the pool. + const maxBufferSize = 16 << 10 + if cap(*b) <= maxBufferSize { + *b = (*b)[:0] + bufPool.Put(b) + } +} + +func (b *Buffer) Reset() { + *b = (*b)[:0] +} + +func (b *Buffer) Write(p []byte) (int, error) { + *b = append(*b, p...) + return len(p), nil +} + +func (b *Buffer) WriteString(s string) { + *b = append(*b, s...) +} + +func (b *Buffer) WriteByte(c byte) { + *b = append(*b, c) +} + +func (b *Buffer) WritePosInt(i int) { + b.WritePosIntWidth(i, 0) +} + +// WritePosIntWidth writes non-negative integer i to the buffer, padded on the left +// by zeroes to the given width. Use a width of 0 to omit padding. +func (b *Buffer) WritePosIntWidth(i, width int) { + // Cheap integer to fixed-width decimal ASCII. + // Copied from log/log.go. + + if i < 0 { + panic("negative int") + } + + // Assemble decimal in reverse order. + var bb [20]byte + bp := len(bb) - 1 + for i >= 10 || width > 1 { + width-- + q := i / 10 + bb[bp] = byte('0' + i - q*10) + bp-- + i = q + } + // i < 10 + bb[bp] = byte('0' + i) + b.Write(bb[bp:]) +} + +func (b *Buffer) String() string { + return string(*b) +} diff --git a/vendor/golang.org/x/exp/slog/json_handler.go b/vendor/golang.org/x/exp/slog/json_handler.go new file mode 100644 index 000000000..0475034ae --- /dev/null +++ b/vendor/golang.org/x/exp/slog/json_handler.go @@ -0,0 +1,344 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package slog + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "strconv" + "time" + "unicode/utf8" + + "golang.org/x/exp/slog/internal/buffer" +) + +// JSONHandler is a Handler that writes Records to an io.Writer as +// line-delimited JSON objects. +type JSONHandler struct { + *commonHandler +} + +// NewJSONHandler creates a JSONHandler that writes to w, +// using the default options. +func NewJSONHandler(w io.Writer) *JSONHandler { + return (HandlerOptions{}).NewJSONHandler(w) +} + +// NewJSONHandler creates a JSONHandler with the given options that writes to w. +func (opts HandlerOptions) NewJSONHandler(w io.Writer) *JSONHandler { + return &JSONHandler{ + &commonHandler{ + json: true, + w: w, + opts: opts, + }, + } +} + +// Enabled reports whether the handler handles records at the given level. +// The handler ignores records whose level is lower. +func (h *JSONHandler) Enabled(level Level) bool { + return h.commonHandler.enabled(level) +} + +// With returns a new JSONHandler whose attributes consists +// of h's attributes followed by attrs. +func (h *JSONHandler) WithAttrs(attrs []Attr) Handler { + return &JSONHandler{commonHandler: h.commonHandler.withAttrs(attrs)} +} + +func (h *JSONHandler) WithGroup(name string) Handler { + return &JSONHandler{commonHandler: h.commonHandler.withGroup(name)} +} + +// Handle formats its argument Record as a JSON object on a single line. +// +// If the Record's time is zero, the time is omitted. +// Otherwise, the key is "time" +// and the value is output as with json.Marshal. +// +// If the Record's level is zero, the level is omitted. +// Otherwise, the key is "level" +// and the value of [Level.String] is output. +// +// If the AddSource option is set and source information is available, +// the key is "source" +// and the value is output as "FILE:LINE". +// +// The message's key is "msg". +// +// To modify these or other attributes, or remove them from the output, use +// [HandlerOptions.ReplaceAttr]. +// +// Values are formatted as with encoding/json.Marshal, with the following +// exceptions: +// - Floating-point NaNs and infinities are formatted as one of the strings +// "NaN", "+Inf" or "-Inf". +// - Levels are formatted as with Level.String. +// +// Each call to Handle results in a single serialized call to io.Writer.Write. +func (h *JSONHandler) Handle(r Record) error { + return h.commonHandler.handle(r) +} + +// Adapted from time.Time.MarshalJSON to avoid allocation. +func appendJSONTime(s *handleState, t time.Time) { + if y := t.Year(); y < 0 || y >= 10000 { + // RFC 3339 is clear that years are 4 digits exactly. + // See golang.org/issue/4556#c15 for more discussion. + s.appendError(errors.New("time.Time year outside of range [0,9999]")) + } + s.buf.WriteByte('"') + *s.buf = t.AppendFormat(*s.buf, time.RFC3339Nano) + s.buf.WriteByte('"') +} + +func appendJSONValue(s *handleState, v Value) error { + switch v.Kind() { + case StringKind: + s.appendString(v.str()) + case Int64Kind: + *s.buf = strconv.AppendInt(*s.buf, v.Int64(), 10) + case Uint64Kind: + *s.buf = strconv.AppendUint(*s.buf, v.Uint64(), 10) + case Float64Kind: + f := v.Float64() + // json.Marshal fails on special floats, so handle them here. + switch { + case math.IsInf(f, 1): + s.buf.WriteString(`"+Inf"`) + case math.IsInf(f, -1): + s.buf.WriteString(`"-Inf"`) + case math.IsNaN(f): + s.buf.WriteString(`"NaN"`) + default: + // json.Marshal is funny about floats; it doesn't + // always match strconv.AppendFloat. So just call it. + // That's expensive, but floats are rare. + if err := appendJSONMarshal(s.buf, f); err != nil { + return err + } + } + case BoolKind: + *s.buf = strconv.AppendBool(*s.buf, v.Bool()) + case DurationKind: + // Do what json.Marshal does. + *s.buf = strconv.AppendInt(*s.buf, int64(v.Duration()), 10) + case TimeKind: + s.appendTime(v.Time()) + case AnyKind: + a := v.Any() + if err, ok := a.(error); ok { + s.appendString(err.Error()) + } else { + return appendJSONMarshal(s.buf, a) + } + default: + panic(fmt.Sprintf("bad kind: %d", v.Kind())) + } + return nil +} + +func appendJSONMarshal(buf *buffer.Buffer, v any) error { + b, err := json.Marshal(v) + if err != nil { + return err + } + buf.Write(b) + return nil +} + +// appendEscapedJSONString escapes s for JSON and appends it to buf. +// It does not surround the string in quotation marks. +// +// Modified from encoding/json/encode.go:encodeState.string, +// with escapeHTML set to true. +// +// TODO: review whether HTML escaping is necessary. +func appendEscapedJSONString(buf []byte, s string) []byte { + char := func(b byte) { buf = append(buf, b) } + str := func(s string) { buf = append(buf, s...) } + + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if htmlSafeSet[b] { + i++ + continue + } + if start < i { + str(s[start:i]) + } + char('\\') + switch b { + case '\\', '"': + char(b) + case '\n': + char('n') + case '\r': + char('r') + case '\t': + char('t') + default: + // This encodes bytes < 0x20 except for \t, \n and \r. + // It also escapes <, >, and & + // because they can lead to security holes when + // user-controlled strings are rendered into JSON + // and served to some browsers. + str(`u00`) + char(hex[b>>4]) + char(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + str(s[start:i]) + } + str(`\ufffd`) + i += size + start = i + continue + } + // U+2028 is LINE SEPARATOR. + // U+2029 is PARAGRAPH SEPARATOR. + // They are both technically valid characters in JSON strings, + // but don't work in JSONP, which has to be evaluated as JavaScript, + // and can lead to security holes there. It is valid JSON to + // escape them, so we do so unconditionally. + // See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion. + if c == '\u2028' || c == '\u2029' { + if start < i { + str(s[start:i]) + } + str(`\u202`) + char(hex[c&0xF]) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + str(s[start:]) + } + return buf +} + +var hex = "0123456789abcdef" + +// Copied from encoding/json/encode.go:encodeState.string. +// +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML