From 115d1f9172402af5d19c22bb21c1d90686fa8b20 Mon Sep 17 00:00:00 2001 From: chressie Date: Fri, 3 Mar 2023 15:30:14 +0100 Subject: [PATCH] glog: update glog to the latest internal version (#59) This change upstreams a number of changes since the last package update. In particular this adds: - New exported APIs (see godoc for further details): - glog.VDepth - glog.InfoDepthf - glog.WarningDepthf - glog.ErrorDepthf - glog.FatalDepthf - glog.ExitDepthf - glog.Names - glog.NewStandardLogger - glog.Verbose.InfoDepth - glog.Verbose.InfoDepthf - glog.ErrNoLog - The -log_backtrace_at flag accepts now a comma-separated list of file:line locations. - The -vmodule flag accepts a new syntax to match the full path to a module instead of just the module name. See the updated package documentation. - A new internal/logsink package to support other outputs than files. Note: This package is internal, because we cannot guarantee at this point that the API remains stable. More details at go/log-vs-glog (google-internal). --- glog.go | 1217 +++++++------------------- glog_bench_test.go | 164 ++++ glog_file.go | 305 ++++++- glog_flags.go | 395 +++++++++ glog_test.go | 482 +++++++--- glog_vmodule_test.go | 10 + go.mod | 2 +- internal/logsink/logsink.go | 387 ++++++++ internal/logsink/logsink_fatal.go | 35 + internal/stackdump/stackdump.go | 127 +++ internal/stackdump/stackdump_test.go | 152 ++++ 11 files changed, 2268 insertions(+), 1008 deletions(-) create mode 100644 glog_bench_test.go create mode 100644 glog_flags.go create mode 100644 glog_vmodule_test.go create mode 100644 internal/logsink/logsink.go create mode 100644 internal/logsink/logsink_fatal.go create mode 100644 internal/stackdump/stackdump.go create mode 100644 internal/stackdump/stackdump_test.go diff --git a/glog.go b/glog.go index 718c34f88..e108ae8b4 100644 --- a/glog.go +++ b/glog.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,15 +50,15 @@ // Log files will be written to this directory instead of the // default temporary directory. // -// Other flags provide aids to debugging. +// Other flags provide aids to debugging. // // -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as +// A comma-separated list of file and line numbers holding a logging +// statement, such as // -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) +// A stack trace will be written to the Info log whenever execution +// hits one of these statements. (Unlike with -vmodule, the ".go" +// must bepresent.) // -v=0 // Enable V-leveled logging at the specified level. // -vmodule="" @@ -66,100 +66,47 @@ // where pattern is a literal file name (minus the ".go" suffix) or // "glob" pattern and N is a V level. For instance, // -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// +// sets the V level to 3 in all Go files whose names begin with "gopher", +// and +// -vmodule=/path/to/glog/glog_test=1 +// sets the V level to 1 in the Go file /path/to/glog/glog_test.go. +// If a glob pattern contains a slash, it is matched against the full path, +// and the file name. Otherwise, the pattern is +// matched only against the file's basename. When both -vmodule and -v +// are specified, the -vmodule values take precedence for the specified +// modules. package glog +// This file contains the parts of the log package that are shared among all +// implementations (file, envelope, and appengine). + import ( - "bufio" "bytes" "errors" - "flag" "fmt" - "io" stdLog "log" "os" - "path/filepath" + "reflect" "runtime" + "runtime/pprof" "strconv" - "strings" "sync" "sync/atomic" + "syscall" "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 + "github.com/golang/glog/internal/logsink" + "github.com/golang/glog/internal/stackdump" ) -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} +var timeNow = time.Now // Stubbed out for testing. -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} +// ErrNoLog is the error we return if no log file has yet been created +// for the specified log type. +var ErrNoLog = errors.New("log file not yet created") // OutputStats tracks the number of output lines and bytes written. type OutputStats struct { @@ -183,724 +130,99 @@ var Stats struct { Info, Warning, Error OutputStats } -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, +var severityStats = [...]*OutputStats{ + logsink.Info: &Stats.Info, + logsink.Warning: &Stats.Warning, + logsink.Error: &Stats.Error, + logsink.Fatal: nil, } -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. +// Level specifies a level of verbosity for V logs. The -v flag is of type +// Level and should be modified only through the flag.Value interface. type Level int32 -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} +var metaPool sync.Pool // Pool of *logsink.Meta. -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.Atoi(value) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) +// metaPoolGet returns a *logsink.Meta from metaPool as both an interface and a +// pointer, allocating a new one if necessary. (Returning the interface value +// directly avoids an allocation if there was an existing pointer in the pool.) +func metaPoolGet() (any, *logsink.Meta) { + if metai := metaPool.Get(); metai != nil { + return metai, metai.(*logsink.Meta) } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil + meta := new(logsink.Meta) + return meta, meta } -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.Atoi(patLev[1]) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} +type stack bool -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -func init() { - flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") - flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") - flag.Var(&logging.verbosity, "v", "log level for V logs") - flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") - - // Default stderrThreshold is ERROR. - logging.stderrThreshold = errorLog - - logging.setVState(0, nil, false) - go logging.flushDaemon() -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. +const ( + noStack = stack(false) + withStack = stack(true) +) -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) +func appendBacktrace(depth int, format string, args []any) (string, []any) { + // Capture a backtrace as a stackdump.Stack (both text and PC slice). + // Structured log sinks can extract the backtrace in whichever format they + // prefer (PCs or text), and Text sinks will include it as just another part + // of the log message. + // + // Use depth instead of depth+1 so that the backtrace always includes the + // log function itself - otherwise the reason for the trace appearing in the + // log may not be obvious to the reader. + dump := stackdump.Caller(depth) + + // Add an arg and an entry in the format string for the stack dump. + // + // Copy the "args" slice to avoid a rare but serious aliasing bug + // (corrupting the caller's slice if they passed it to a non-Fatal call + // using "..."). + format = format + "\n\n%v\n" + args = append(append([]any(nil), args...), dump) + + return format, args +} + +// logf writes a log message for a log function call (or log function wrapper) +// at the given depth in the current goroutine's stack. +func logf(depth int, severity logsink.Severity, verbose bool, stack stack, format string, args ...any) { + now := timeNow() + _, file, line, ok := runtime.Caller(depth + 1) if !ok { file = "???" line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } } - return l.formatHeader(s, file, line), file, line -} -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits + if stack == withStack || backtraceAt(file, line) { + format, args = appendBacktrace(depth+1, format, args) } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: now, + File: file, + Line: line, + Depth: depth + 1, + Severity: severity, + Verbose: verbose, + Thread: int64(pid), } - return copy(buf.tmp[i:], buf.tmp[j:]) + sinkf(meta, format, args...) + metaPool.Put(metai) } -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if !flag.Parsed() { - os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) - os.Stderr.Write(data) - } else if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { +func sinkf(meta *logsink.Meta, format string, args ...any) { + meta.Depth++ + n, err := logsink.Printf(meta, format, args...) + if stats := severityStats[meta.Severity]; stats != nil { atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= MaxSize { - if err := sb.rotateFile(time.Now()); err != nil { - sb.logger.exit(err) - } + atomic.AddInt64(&stats.bytes, int64(n)) } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} -// rotateFile closes the syncBuffer's file and starts a new one. -func (sb *syncBuffer) rotateFile(now time.Time) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now) - sb.nbytes = 0 if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - } - if err := sb.rotateFile(now); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 30 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } + logsink.Printf(meta, "glog: exiting because of error: %s", err) + sinks.file.Flush() + os.Exit(2) } } @@ -912,9 +234,9 @@ func (l *loggingT) flushAll() { // Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not // recognized, CopyStandardLogTo panics. func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): %v", name, err)) } // Set a log format that captures the user's file and line: // d.go:23: message @@ -922,9 +244,22 @@ func CopyStandardLogTo(name string) { stdLog.SetOutput(logBridge(sev)) } +// NewStandardLogger returns a Logger that writes to the Google logs for the +// named and lower severities. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, NewStandardLogger panics. +func NewStandardLogger(name string) *stdLog.Logger { + sev, err := logsink.ParseSeverity(name) + if err != nil { + panic(fmt.Sprintf("log.NewStandardLogger(%q): %v", name, err)) + } + return stdLog.New(logBridge(sev), "", stdLog.Lshortfile) +} + // logBridge provides the Write method that enables CopyStandardLogTo to connect // Go's standard logs to the logs provided by this package. -type logBridge severity +type logBridge logsink.Severity // Write parses the standard logging line and passes its components to the // logger for severity(lb). @@ -946,36 +281,72 @@ func (lb logBridge) Write(b []byte) (n int, err error) { line = 1 } } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) + + // The depth below hard-codes details of how stdlog gets here. The alternative would be to walk + // up the stack looking for src/log/log.go but that seems like it would be + // unfortunately slow. + const stdLogDepth = 4 + + metai, meta := metaPoolGet() + *meta = logsink.Meta{ + Time: timeNow(), + File: file, + Line: line, + Depth: stdLogDepth, + Severity: logsink.Severity(lb), + Thread: int64(pid), + } + + format := "%s" + args := []any{text} + if backtraceAt(file, line) { + format, args = appendBacktrace(meta.Depth, format, args) + } + + sinkf(meta, format, args...) + metaPool.Put(metai) + return len(b), nil } -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] +// defaultFormat returns a fmt.Printf format specifier that formats its +// arguments as if they were passed to fmt.Print. +func defaultFormat(args []any) string { + n := len(args) + switch n { + case 0: + return "" + case 1: + return "%v" + } + + b := make([]byte, 0, n*3-1) + wasString := true // Suppress leading space. + for _, arg := range args { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + if wasString || isString { + b = append(b, "%v"...) + } else { + b = append(b, " %v"...) + } + wasString = isString } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] + return string(b) +} + +// lnFormat returns a fmt.Printf format specifier that formats its arguments +// as if they were passed to fmt.Println. +func lnFormat(args []any) string { + if len(args) == 0 { + return "\n" } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } + + b := make([]byte, 0, len(args)*3) + for range args { + b = append(b, "%v "...) } - l.vmap[pc] = 0 - return 0 + b[len(b)-1] = '\n' // Replace the last space with a newline. + return string(b) } // Verbose is a boolean type that implements Infof (like Printf) etc. @@ -986,9 +357,13 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either +// // if glog.V(2) { glog.Info("log this") } +// // or +// // glog.V(2).Info("log this") +// // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -997,184 +372,250 @@ type Verbose bool // V is at most the value of -v, or of -vmodule for the source file containing the // call, the V call will log. func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. + return VDepth(1, level) +} - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } +// VDepth acts as V but uses depth to determine which call frame to check vmodule for. +// VDepth(0, level) is the same as V(level). +func VDepth(depth int, level Level) Verbose { + return Verbose(verboseEnabled(depth+1, level)) +} - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...any) { + v.InfoDepth(1, args...) +} + +// InfoDepth is equivalent to the global InfoDepth function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) InfoDepth(depth int, args ...any) { + if v { + logf(depth+1, logsink.Info, true, noStack, defaultFormat(args), args...) } - return Verbose(false) } -// Info is equivalent to the global Info function, guarded by the value of v. +// InfoDepthf is equivalent to the global InfoDepthf function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { +func (v Verbose) InfoDepthf(depth int, format string, args ...any) { if v { - logging.print(infoLog, args...) + logf(depth+1, logsink.Info, true, noStack, format, args...) } } // Infoln is equivalent to the global Infoln function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { +func (v Verbose) Infoln(args ...any) { if v { - logging.println(infoLog, args...) + logf(1, logsink.Info, true, noStack, lnFormat(args), args...) } } // Infof is equivalent to the global Infof function, guarded by the value of v. // See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { +func (v Verbose) Infof(format string, args ...any) { if v { - logging.printf(infoLog, format, args...) + logf(1, logsink.Info, true, noStack, format, args...) } } // Info logs to the INFO log. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) +func Info(args ...any) { + InfoDepth(1, args...) +} + +// InfoDepth calls Info from a different depth in the call stack. +// This enables a callee to emit logs that use the callsite information of its caller +// or any other callers in the stack. When depth == 0, the original callee's line +// information is emitted. When depth > 0, depth frames are skipped in the call stack +// and the final frame is treated like the original callee to Info. +func InfoDepth(depth int, args ...any) { + logf(depth+1, logsink.Info, false, noStack, defaultFormat(args), args...) } -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) +// InfoDepthf acts as InfoDepth but with format string. +func InfoDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Info, false, noStack, format, args...) } // Infoln logs to the INFO log. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) +func Infoln(args ...any) { + logf(1, logsink.Info, false, noStack, lnFormat(args), args...) } // Infof logs to the INFO log. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) +func Infof(format string, args ...any) { + logf(1, logsink.Info, false, noStack, format, args...) } // Warning logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) +func Warning(args ...any) { + WarningDepth(1, args...) } // WarningDepth acts as Warning but uses depth to determine which call frame to log. // WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) +func WarningDepth(depth int, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, defaultFormat(args), args...) +} + +// WarningDepthf acts as Warningf but uses depth to determine which call frame to log. +// WarningDepthf(0, "msg") is the same as Warningf("msg"). +func WarningDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Warning, false, noStack, format, args...) } // Warningln logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) +func Warningln(args ...any) { + logf(1, logsink.Warning, false, noStack, lnFormat(args), args...) } // Warningf logs to the WARNING and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) +func Warningf(format string, args ...any) { + logf(1, logsink.Warning, false, noStack, format, args...) } // Error logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) +func Error(args ...any) { + ErrorDepth(1, args...) } // ErrorDepth acts as Error but uses depth to determine which call frame to log. // ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) +func ErrorDepth(depth int, args ...any) { + logf(depth+1, logsink.Error, false, noStack, defaultFormat(args), args...) +} + +// ErrorDepthf acts as Errorf but uses depth to determine which call frame to log. +// ErrorDepthf(0, "msg") is the same as Errorf("msg"). +func ErrorDepthf(depth int, format string, args ...any) { + logf(depth+1, logsink.Error, false, noStack, format, args...) } // Errorln logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) +func Errorln(args ...any) { + logf(1, logsink.Error, false, noStack, lnFormat(args), args...) } // Errorf logs to the ERROR, WARNING, and INFO logs. // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) +func Errorf(format string, args ...any) { + logf(1, logsink.Error, false, noStack, format, args...) +} + +func fatalf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, withStack, format, args...) + sinks.file.Flush() + + err := abortProcess() // Should not return. + + // Failed to abort the process using signals. Dump a stack trace and exit. + Errorf("abortProcess returned unexpectedly: %v", err) + sinks.file.Flush() + pprof.Lookup("goroutine").WriteTo(os.Stderr, 1) + os.Exit(2) // Exit with the same code as the default SIGABRT handler. +} + +// abortProcess attempts to kill the current process in a way that will dump the +// currently-running goroutines someplace useful (Coroner or stderr). +// +// It does this by sending SIGABRT to the current process. Unfortunately, the +// signal may or may not be delivered to the current thread; in order to do that +// portably, we would need to add a cgo dependency and call pthread_kill. +// +// If successful, abortProcess does not return. +func abortProcess() error { + p, err := os.FindProcess(os.Getpid()) + if err != nil { + return err + } + if err := p.Signal(syscall.SIGABRT); err != nil { + return err + } + + // Sent the signal. Now we wait for it to arrive and any SIGABRT handlers to + // run (and eventually terminate the process themselves). + // + // We could just "select{}" here, but there's an outside chance that would + // trigger the runtime's deadlock detector if there happen not to be any + // background goroutines running. So we'll sleep a while first to give + // the signal some time. + time.Sleep(10 * time.Second) + select {} } // Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) +func Fatal(args ...any) { + FatalDepth(1, args...) } // FatalDepth acts as Fatal but uses depth to determine which call frame to log. // FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) +func FatalDepth(depth int, args ...any) { + fatalf(depth+1, defaultFormat(args), args...) +} + +// FatalDepthf acts as Fatalf but uses depth to determine which call frame to log. +// FatalDepthf(0, "msg") is the same as Fatalf("msg"). +func FatalDepthf(depth int, format string, args ...any) { + fatalf(depth+1, format, args...) } // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Println; a newline is appended if missing. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) +func Fatalln(args ...any) { + fatalf(1, lnFormat(args), args...) } // Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). +// including a stack trace of all running goroutines, then calls os.Exit(2). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) +func Fatalf(format string, args ...any) { + fatalf(1, format, args...) } -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 +func exitf(depth int, format string, args ...any) { + logf(depth+1, logsink.Fatal, false, noStack, format, args...) + sinks.file.Flush() + os.Exit(1) +} // Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) +func Exit(args ...any) { + ExitDepth(1, args...) } // ExitDepth acts as Exit but uses depth to determine which call frame to log. // ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) +func ExitDepth(depth int, args ...any) { + exitf(depth+1, defaultFormat(args), args...) +} + +// ExitDepthf acts as Exitf but uses depth to determine which call frame to log. +// ExitDepthf(0, "msg") is the same as Exitf("msg"). +func ExitDepthf(depth int, format string, args ...any) { + exitf(depth+1, format, args...) } // Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) +func Exitln(args ...any) { + exitf(1, lnFormat(args), args...) } // Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). // Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) +func Exitf(format string, args ...any) { + exitf(1, format, args...) } diff --git a/glog_bench_test.go b/glog_bench_test.go new file mode 100644 index 000000000..359546f8c --- /dev/null +++ b/glog_bench_test.go @@ -0,0 +1,164 @@ +package glog + +import ( + "flag" + "io/ioutil" + "runtime" + "sync" + "sync/atomic" + "testing" + "time" +) + +// discarder is a flushSyncWriter that discards all data. +// Sync sleeps for 10ms to simulate a disk seek. +type discarder struct { +} + +func (d *discarder) Write(data []byte) (int, error) { + return len(data), nil +} + +func (d *discarder) Flush() error { + return nil +} + +func (d *discarder) Sync() error { + time.Sleep(10 * time.Millisecond) + return nil +} + +func (d *discarder) filenames() []string { + return nil +} + +// newDiscard sets the log writers to all new byte buffers and returns the old array. +func (s *fileSink) newDiscarders() severityWriters { + return s.swap(severityWriters{new(discarder), new(discarder), new(discarder), new(discarder)}) +} + +func discardStderr() func() { + se := sinks.stderr.w + sinks.stderr.w = ioutil.Discard + return func() { sinks.stderr.w = se } +} + +const message = "benchmark log message" + +func benchmarkLog(b *testing.B, log func(...any)) { + defer sinks.file.swap(sinks.file.newDiscarders()) + defer discardStderr()() + b.ResetTimer() + for i := 0; i < b.N; i++ { + log(message) + } + b.StopTimer() +} + +func benchmarkLogConcurrent(b *testing.B, log func(...any)) { + defer sinks.file.swap(sinks.file.newDiscarders()) + defer discardStderr()() + b.ResetTimer() + concurrency := runtime.GOMAXPROCS(0) + var wg sync.WaitGroup + wg.Add(concurrency) + for i := 0; i < concurrency; i++ { + go func() { + for i := 0; i < b.N; i++ { + log(message) + } + wg.Done() + }() + } + wg.Wait() + b.StopTimer() +} + +func BenchmarkInfo(b *testing.B) { + benchmarkLog(b, Info) +} + +func BenchmarkInfoConcurrent(b *testing.B) { + benchmarkLogConcurrent(b, Info) +} + +func BenchmarkWarning(b *testing.B) { + benchmarkLog(b, Warning) +} + +func BenchmarkWarningConcurrent(b *testing.B) { + benchmarkLogConcurrent(b, Warning) +} + +func BenchmarkError(b *testing.B) { + benchmarkLog(b, Error) +} + +func BenchmarkErrorConcurrent(b *testing.B) { + benchmarkLogConcurrent(b, Error) +} + +func mixer() func(...any) { + var i int64 + return func(args ...any) { + n := atomic.AddInt64(&i, 1) + switch { + case n%10000 == 0: + Error(args...) + case n%1000 == 0: + Warning(args...) + default: + Info(args...) + } + } +} + +func BenchmarkMix(b *testing.B) { + benchmarkLog(b, mixer()) +} + +func BenchmarkMixConcurrent(b *testing.B) { + benchmarkLogConcurrent(b, mixer()) +} + +func BenchmarkVLogDisabled(b *testing.B) { + benchmarkLog(b, vlog) +} + +func BenchmarkVLogDisabledConcurrent(b *testing.B) { + benchmarkLogConcurrent(b, vlog) +} + +func BenchmarkVLogModuleFlagSet(b *testing.B) { + defer withVmodule("nonexistant=5")() + benchmarkLog(b, vlog) +} + +func BenchmarkVLogModuleFlagSetConcurrent(b *testing.B) { + defer withVmodule("nonexistant=5")() + benchmarkLogConcurrent(b, vlog) +} + +func BenchmarkVLogEnabled(b *testing.B) { + defer withVmodule("glog_bench_test=5")() + if got := bool(V(3)); got != true { + b.Fatalf("V(3) == %v, want %v", got, true) + } + benchmarkLog(b, vlog) +} + +func BenchmarkVLogEnabledConcurrent(b *testing.B) { + defer withVmodule("glog_bench_test=5")() + benchmarkLogConcurrent(b, vlog) +} + +func vlog(args ...any) { + V(3).Info(args) +} + +func withVmodule(val string) func() { + if err := flag.Set("vmodule", val); err != nil { + panic(err) + } + return func() { flag.Set("vmodule", "") } +} diff --git a/glog_file.go b/glog_file.go index 65075d281..af1c934b8 100644 --- a/glog_file.go +++ b/glog_file.go @@ -1,6 +1,6 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// Go support for leveled logs, analogous to https://github.com/google/glog. // -// Copyright 2013 Google Inc. All Rights Reserved. +// Copyright 2023 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -19,26 +19,34 @@ package glog import ( + "bufio" + "bytes" "errors" "flag" "fmt" + "io" "os" "os/user" "path/filepath" + "runtime" "strings" "sync" "time" -) -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 + "github.com/golang/glog/internal/logsink" +) // logDirs lists the candidate directories for new log files. var logDirs []string -// If non-empty, overrides the choice of directory in which to write logs. -// See createLogDirs for the full list of possible destinations. -var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") +var ( + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + logLink = flag.String("log_link", "", "If non-empty, add symbolic links in this directory to the log files") + logBufLevel = flag.Int("logbuflevel", int(logsink.Info), "Buffer log messages logged at this level or lower"+ + " (-1 means don't buffer; 0 means buffer INFO only; ...). Has limited applicability on non-prod platforms.") +) func createLogDirs() { if *logDir != "" { @@ -64,9 +72,17 @@ func init() { if err == nil { userName = current.Username } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) + // Sanitize userName since it is used to construct file paths. + userName = strings.Map(func(r rune) rune { + switch { + case r >= 'a' && r <= 'z': + case r >= 'A' && r <= 'Z': + case r >= '0' && r <= '9': + default: + return '_' + } + return r + }, userName) } // shortHostname returns its argument, truncating at the first period. @@ -122,3 +138,270 @@ func create(tag string, t time.Time) (f *os.File, filename string, err error) { } return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) } + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer + filenames() []string +} + +var sinks struct { + stderr stderrSink + file fileSink +} + +func init() { + sinks.stderr.w = os.Stderr + + // Register stderr first: that way if we crash during file-writing at least + // the log will have gone somewhere. + logsink.TextSinks = append(logsink.TextSinks, &sinks.stderr, &sinks.file) + + sinks.file.flushChan = make(chan logsink.Severity, 1) + go sinks.file.flushDaemon() +} + +// stderrSink is a logsink.Text that writes log entries to stderr +// if they meet certain conditions. +type stderrSink struct { + mu sync.Mutex + w io.Writer +} + +// Enabled implements logsink.Text.Enabled. It returns true if any of the +// various stderr flags are enabled for logs of the given severity, if the log +// message is from the standard "log" package, or if google.Init has not yet run +// (and hence file logging is not yet initialized). +func (s *stderrSink) Enabled(m *logsink.Meta) bool { + return toStderr || alsoToStderr || m.Severity >= stderrThreshold.get() +} + +// Emit implements logsink.Text.Emit. +func (s *stderrSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + dn, err := s.w.Write(data) + n += dn + return n, err +} + +// severityWriters is an array of flushSyncWriter with a value for each +// logsink.Severity. +type severityWriters [4]flushSyncWriter + +// fileSink is a logsink.Text that prints to a set of Google log files. +type fileSink struct { + mu sync.Mutex + // file holds writer for each of the log types. + file severityWriters + flushChan chan logsink.Severity +} + +// Enabled implements logsink.Text.Enabled. It returns true if google.Init +// has run and both --disable_log_to_disk and --logtostderr are false. +func (s *fileSink) Enabled(m *logsink.Meta) bool { + return !toStderr +} + +// Emit implements logsink.Text.Emit +func (s *fileSink) Emit(m *logsink.Meta, data []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + if err = s.createMissingFiles(m.Severity); err != nil { + return 0, err + } + for sev := m.Severity; sev >= logsink.Info; sev-- { + if _, fErr := s.file[sev].Write(data); fErr != nil && err == nil { + err = fErr // Take the first error. + } + } + n = len(data) + if int(m.Severity) > *logBufLevel { + select { + case s.flushChan <- m.Severity: + default: + } + } + + return n, err +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// s.mu is held for all its methods. +type syncBuffer struct { + sink *fileSink + *bufio.Writer + file *os.File + names []string + sev logsink.Severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + return 0, err + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + return n, err +} + +func (sb *syncBuffer) filenames() []string { + return sb.names +} + +const footer = "\nCONTINUED IN NEXT FILE\n" + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + var err error + pn := "" + file, name, err := create(sb.sev.String(), now) + + if sb.file != nil { + // The current log file becomes the previous log at the end of + // this block, so save its name for use in the header of the next + // file. + pn = sb.file.Name() + sb.Flush() + // If there's an existing file, write a footer with the name of + // the next file in the chain, followed by the constant string + // \nCONTINUED IN NEXT FILE\n to make continuation detection simple. + sb.file.Write([]byte("Next log: ")) + sb.file.Write([]byte(name)) + sb.file.Write([]byte(footer)) + sb.file.Close() + } + + sb.file = file + sb.names = append(sb.names, name) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Previous log: %s\n", pn) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createMissingFiles creates all the log files for severity from infoLog up to +// upTo that have not already been created. +// s.mu is held. +func (s *fileSink) createMissingFiles(upTo logsink.Severity) error { + if s.file[upTo] != nil { + return nil + } + now := time.Now() + // Files are created in increasing severity order, so we can be assured that + // if a high severity logfile exists, then so do all of lower severity. + for sev := logsink.Info; sev <= upTo; sev++ { + if s.file[sev] != nil { + continue + } + sb := &syncBuffer{ + sink: s, + sev: sev, + } + if err := sb.rotateFile(now); err != nil { + return err + } + s.file[sev] = sb + } + return nil +} + +// flushDaemon periodically flushes the log file buffers. +func (s *fileSink) flushDaemon() { + tick := time.NewTicker(30 * time.Second) + defer tick.Stop() + for { + select { + case <-tick.C: + s.Flush() + case sev := <-s.flushChan: + s.flush(sev) + } + } +} + +// Flush flushes all pending log I/O. +func Flush() { + sinks.file.Flush() +} + +// Flush flushes all the logs and attempts to "sync" their data to disk. +func (s *fileSink) Flush() error { + return s.flush(logsink.Info) +} + +// flush flushes all logs of severity threshold or greater. +func (s *fileSink) flush(threshold logsink.Severity) error { + s.mu.Lock() + defer s.mu.Unlock() + + var firstErr error + updateErr := func(err error) { + if err != nil && firstErr == nil { + firstErr = err + } + } + + // Flush from fatal down, in case there's trouble flushing. + for sev := logsink.Fatal; sev >= threshold; sev-- { + file := s.file[sev] + if file != nil { + updateErr(file.Flush()) + updateErr(file.Sync()) + } + } + + return firstErr +} + +// Names returns the names of the log files holding the FATAL, ERROR, +// WARNING, or INFO logs. Returns ErrNoLog if the log for the given +// level doesn't exist (e.g. because no messages of that level have been +// written). This may return multiple names if the log type requested +// has rolled over. +func Names(s string) ([]string, error) { + severity, err := logsink.ParseSeverity(s) + if err != nil { + return nil, err + } + + sinks.file.mu.Lock() + defer sinks.file.mu.Unlock() + f := sinks.file.file[severity] + if f == nil { + return nil, ErrNoLog + } + + return f.filenames(), nil +} diff --git a/glog_flags.go b/glog_flags.go new file mode 100644 index 000000000..3060e54d9 --- /dev/null +++ b/glog_flags.go @@ -0,0 +1,395 @@ +// Go support for leveled logs, analogous to https://github.com/google/glog. +// +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package glog + +import ( + "bytes" + "errors" + "flag" + "fmt" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + + "github.com/golang/glog/internal/logsink" +) + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + full bool // The pattern wants to match the full path + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(full, file string) bool { + if m.literal { + if m.full { + return full == m.pattern + } + return file == m.pattern + } + if m.full { + match, _ := filepath.Match(m.pattern, full) + return match + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// isFull reports whether the pattern matches the full file path, that is, +// whether it contains /. +func isFull(pattern string) bool { + return strings.ContainsRune(pattern, '/') +} + +// verboseFlags represents the setting of the -v and -vmodule flags. +type verboseFlags struct { + // moduleLevelCache is a sync.Map storing the -vmodule Level for each V() + // call site, identified by PC. If there is no matching -vmodule filter, + // the cached value is exactly v. moduleLevelCache is replaced with a new + // Map whenever the -vmodule or -v flag changes state. + moduleLevelCache atomic.Value + + // mu guards all fields below. + mu sync.Mutex + + // v stores the value of the -v flag. It may be read safely using + // sync.LoadInt32, but is only modified under mu. + v Level + + // module stores the parsed -vmodule flag. + module []modulePat + + // moduleLength caches len(module). If greater than zero, it + // means vmodule is enabled. It may be read safely using sync.LoadInt32, but + // is only modified under mu. + moduleLength int32 +} + +// NOTE: For compatibility with the open-sourced v1 version of this +// package (github.com/golang/glog) we need to retain that flag.Level +// implements the flag.Value interface. See also go/log-vs-glog. + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(l.Get().(Level)), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() any { + if l == &vflags.v { + // l is the value registered for the -v flag. + return Level(atomic.LoadInt32((*int32)(l))) + } + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + if l == &vflags.v { + // l is the value registered for the -v flag. + vflags.mu.Lock() + defer vflags.mu.Unlock() + vflags.moduleLevelCache.Store(&sync.Map{}) + atomic.StoreInt32((*int32)(l), int32(v)) + return nil + } + *l = Level(v) + return nil +} + +// vModuleFlag is the flag.Value for the --vmodule flag. +type vModuleFlag struct{ *verboseFlags } + +func (f vModuleFlag) String() string { + f.mu.Lock() + defer f.mu.Unlock() + + var b bytes.Buffer + for i, f := range f.module { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get returns nil for this flag type since the struct is not exported. +func (f vModuleFlag) Get() any { return nil } + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,foo/bar/baz=1,gfs*=3 +func (f vModuleFlag) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), isFull(pattern), Level(v)}) + } + + f.mu.Lock() + defer f.mu.Unlock() + f.module = filter + atomic.StoreInt32((*int32)(&f.moduleLength), int32(len(f.module))) + f.moduleLevelCache.Store(&sync.Map{}) + return nil +} + +func (f *verboseFlags) levelForPC(pc uintptr) Level { + if level, ok := f.moduleLevelCache.Load().(*sync.Map).Load(pc); ok { + return level.(Level) + } + + f.mu.Lock() + defer f.mu.Unlock() + level := Level(f.v) + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d for + // regular matches, /a/b/c/d for full matches. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + full := file + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range f.module { + if filter.match(full, file) { + level = filter.level + break // Use the first matching level. + } + } + f.moduleLevelCache.Load().(*sync.Map).Store(pc, level) + return level +} + +func (f *verboseFlags) enabled(callerDepth int, level Level) bool { + if atomic.LoadInt32(&f.moduleLength) == 0 { + // No vmodule values specified, so compare against v level. + return Level(atomic.LoadInt32((*int32)(&f.v))) >= level + } + + pcs := [1]uintptr{} + if runtime.Callers(callerDepth+2, pcs[:]) < 1 { + return false + } + frame, _ := runtime.CallersFrames(pcs[:]).Next() + return f.levelForPC(frame.Entry) >= level +} + +// traceLocation represents an entry in the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +func parseTraceLocation(value string) (traceLocation, error) { + fields := strings.Split(value, ":") + if len(fields) != 2 { + return traceLocation{}, errTraceSyntax + } + file, lineStr := fields[0], fields[1] + if !strings.Contains(file, ".") { + return traceLocation{}, errTraceSyntax + } + line, err := strconv.Atoi(lineStr) + if err != nil { + return traceLocation{}, errTraceSyntax + } + if line < 0 { + return traceLocation{}, errors.New("negative value for line") + } + return traceLocation{file, line}, nil +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +func (t traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t traceLocation) String() string { + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// traceLocations represents the -log_backtrace_at flag. +// Syntax: -log_backtrace_at=recordio.go:234,sstable.go:456 +// Note that unlike vmodule the file extension is included here. +type traceLocations struct { + mu sync.Mutex + locsLen int32 // Safe for atomic read without mu. + locs []traceLocation +} + +func (t *traceLocations) String() string { + t.mu.Lock() + defer t.mu.Unlock() + + var buf bytes.Buffer + for i, tl := range t.locs { + if i > 0 { + buf.WriteString(",") + } + buf.WriteString(tl.String()) + } + return buf.String() +} + +// Get always returns nil for this flag type since the struct is not exported +func (t *traceLocations) Get() any { return nil } + +func (t *traceLocations) Set(value string) error { + var locs []traceLocation + for _, s := range strings.Split(value, ",") { + if s == "" { + continue + } + loc, err := parseTraceLocation(s) + if err != nil { + return err + } + locs = append(locs, loc) + } + + t.mu.Lock() + defer t.mu.Unlock() + atomic.StoreInt32(&t.locsLen, int32(len(locs))) + t.locs = locs + return nil +} + +func (t *traceLocations) match(file string, line int) bool { + if atomic.LoadInt32(&t.locsLen) == 0 { + return false + } + + t.mu.Lock() + defer t.mu.Unlock() + for _, tl := range t.locs { + if tl.match(file, line) { + return true + } + } + return false +} + +// severityFlag is an atomic flag.Value implementation for logsink.Severity. +type severityFlag int32 + +func (s *severityFlag) get() logsink.Severity { + return logsink.Severity(atomic.LoadInt32((*int32)(s))) +} +func (s *severityFlag) String() string { return strconv.FormatInt(int64(*s), 10) } +func (s *severityFlag) Get() any { return s.get() } +func (s *severityFlag) Set(value string) error { + threshold, err := logsink.ParseSeverity(value) + if err != nil { + // Not a severity name. Try a raw number. + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = logsink.Severity(v) + if threshold < logsink.Info || threshold > logsink.Fatal { + return fmt.Errorf("Severity %d out of range (min %d, max %d).", v, logsink.Info, logsink.Fatal) + } + } + atomic.StoreInt32((*int32)(s), int32(threshold)) + return nil +} + +var ( + vflags verboseFlags // The -v and -vmodule flags. + + logBacktraceAt traceLocations // The -log_backtrace_at flag. + + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + stderrThreshold severityFlag // The -stderrthreshold flag. +) + +// verboseEnabled returns whether the caller at the given depth should emit +// verbose logs at the given level, with depth 0 identifying the caller of +// verboseEnabled. +func verboseEnabled(callerDepth int, level Level) bool { + return vflags.enabled(callerDepth+1, level) +} + +// backtraceAt returns whether the logging call at the given function and line +// should also emit a backtrace of the current call stack. +func backtraceAt(file string, line int) bool { + return logBacktraceAt.match(file, line) +} + +func init() { + vflags.moduleLevelCache.Store(&sync.Map{}) + + flag.Var(&vflags.v, "v", "log level for V logs") + flag.Var(vModuleFlag{&vflags}, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + + flag.Var(&logBacktraceAt, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + stderrThreshold = severityFlag(logsink.Error) + + flag.BoolVar(&toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") +} diff --git a/glog_test.go b/glog_test.go index 0fb376e1f..81c43ce15 100644 --- a/glog_test.go +++ b/glog_test.go @@ -1,24 +1,10 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - package glog import ( "bytes" + "flag" "fmt" + "io/ioutil" stdLog "log" "path/filepath" "runtime" @@ -26,14 +12,17 @@ import ( "strings" "testing" "time" + + "github.com/golang/glog/internal/logsink" ) // Test that shortHostname works as advertised. func TestShortHostname(t *testing.T) { for hostname, expect := range map[string]string{ - "": "", - "host": "host", - "host.google.com": "host", + "": "", + "host": "host", + "host.google.com": "host", + "host.corp.google.com": "host", } { if got := shortHostname(hostname); expect != got { t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got) @@ -54,53 +43,57 @@ func (f *flushBuffer) Sync() error { return nil } +func (f *flushBuffer) filenames() []string { + return []string{""} +} + // swap sets the log writers and returns the old array. -func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) { - l.mu.Lock() - defer l.mu.Unlock() - old = l.file +func (s *fileSink) swap(writers severityWriters) (old severityWriters) { + s.mu.Lock() + defer s.mu.Unlock() + old = s.file for i, w := range writers { - logging.file[i] = w + s.file[i] = w } return } // newBuffers sets the log writers to all new byte buffers and returns the old array. -func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter { - return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) +func (s *fileSink) newBuffers() severityWriters { + return s.swap(severityWriters{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)}) } // contents returns the specified log value as a string. -func contents(s severity) string { - return logging.file[s].(*flushBuffer).String() +func contents(s logsink.Severity) string { + return sinks.file.file[s].(*flushBuffer).String() } // contains reports whether the string is contained in the log. -func contains(s severity, str string, t *testing.T) bool { +func contains(s logsink.Severity, str string, t *testing.T) bool { return strings.Contains(contents(s), str) } // setFlags configures the logging flags how the test expects them. func setFlags() { - logging.toStderr = false + toStderr = false } // Test that Info works as advertised. func TestInfo(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) + defer sinks.file.swap(sinks.file.newBuffers()) Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) + if !contains(logsink.Info, "I", t) { + t.Errorf("Info has wrong character: %q", contents(logsink.Info)) } - if !contains(infoLog, "test", t) { + if !contains(logsink.Info, "test", t) { t.Error("Info failed") } } func TestInfoDepth(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) + defer sinks.file.swap(sinks.file.newBuffers()) f := func() { InfoDepth(1, "depth-test1") } @@ -109,7 +102,7 @@ func TestInfoDepth(t *testing.T) { InfoDepth(0, "depth-test0") f() - msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n") + msgs := strings.Split(strings.TrimSuffix(contents(logsink.Info), "\n"), "\n") if len(msgs) != 2 { t.Fatalf("Got %d lines, expected 2", len(msgs)) } @@ -159,12 +152,12 @@ func TestCopyStandardLogToPanic(t *testing.T) { // Test that using the standard log package logs to INFO. func TestStandardLog(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) + defer sinks.file.swap(sinks.file.newBuffers()) stdLog.Print("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) + if !contains(logsink.Info, "I", t) { + t.Errorf("Info has wrong character: %q", contents(logsink.Info)) } - if !contains(infoLog, "test", t) { + if !contains(logsink.Info, "test", t) { t.Error("Info failed") } } @@ -172,25 +165,37 @@ func TestStandardLog(t *testing.T) { // Test that the header has the correct format. func TestHeader(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) defer func(previous func() time.Time) { timeNow = previous }(timeNow) timeNow = func() time.Time { return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) } + + oldPID := pid + defer func() { pid = oldPID }() pid = 1234 - Info("test") + + defer sinks.file.swap(sinks.file.newBuffers()) + + Info("testHeader") var line int - format := "I0102 15:04:05.067890 1234 glog_test.go:%d] test\n" - n, err := fmt.Sscanf(contents(infoLog), format, &line) - if n != 1 || err != nil { - t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) + format := "I0102 15:04:05.067890 %7d glog_test.go:%d] testHeader\n" + var gotPID int64 + n, err := fmt.Sscanf(contents(logsink.Info), format, &gotPID, &line) + if n != 2 || err != nil { + t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(logsink.Info)) + } + + if want := int64(pid); gotPID != want { + t.Errorf("expected log line to be logged with process ID %d, got %d", want, gotPID) } + // Scanf treats multiple spaces as equivalent to a single space, // so check for correct space-padding also. - want := fmt.Sprintf(format, line) - if contents(infoLog) != want { - t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) + want := fmt.Sprintf(format, gotPID, line) + if contents(logsink.Info) != want { + t.Errorf("log format error: got:\n\t%q\nwant:\n\t%q", contents(logsink.Info), want) } + } // Test that an Error log goes to Warning and Info. @@ -198,19 +203,19 @@ func TestHeader(t *testing.T) { // all be identical. func TestError(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) + defer sinks.file.swap(sinks.file.newBuffers()) Error("test") - if !contains(errorLog, "E", t) { - t.Errorf("Error has wrong character: %q", contents(errorLog)) + if !contains(logsink.Error, "E", t) { + t.Errorf("Error has wrong character: %q", contents(logsink.Error)) } - if !contains(errorLog, "test", t) { + if !contains(logsink.Error, "test", t) { t.Error("Error failed") } - str := contents(errorLog) - if !contains(warningLog, str, t) { + str := contents(logsink.Error) + if !contains(logsink.Warning, str, t) { t.Error("Warning failed") } - if !contains(infoLog, str, t) { + if !contains(logsink.Info, str, t) { t.Error("Info failed") } } @@ -220,16 +225,16 @@ func TestError(t *testing.T) { // all be identical. func TestWarning(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) + defer sinks.file.swap(sinks.file.newBuffers()) Warning("test") - if !contains(warningLog, "W", t) { - t.Errorf("Warning has wrong character: %q", contents(warningLog)) + if !contains(logsink.Warning, "W", t) { + t.Errorf("Warning has wrong character: %q", contents(logsink.Warning)) } - if !contains(warningLog, "test", t) { + if !contains(logsink.Warning, "test", t) { t.Error("Warning failed") } - str := contents(warningLog) - if !contains(infoLog, str, t) { + str := contents(logsink.Warning) + if !contains(logsink.Info, str, t) { t.Error("Info failed") } } @@ -237,24 +242,90 @@ func TestWarning(t *testing.T) { // Test that a V log goes to Info. func TestV(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) - logging.verbosity.Set("2") - defer logging.verbosity.Set("0") + defer sinks.file.swap(sinks.file.newBuffers()) + if err := flag.Lookup("v").Value.Set("2"); err != nil { + t.Fatalf("Failed to set -v=2: %v", err) + } + defer flag.Lookup("v").Value.Set("0") + V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) + if !contains(logsink.Info, "I", t) { + t.Errorf("Info has wrong character: %q", contents(logsink.Info)) } - if !contains(infoLog, "test", t) { + if !contains(logsink.Info, "test", t) { t.Error("Info failed") } } +// Test that updating -v at runtime, while -vmodule is set to a non-empty +// value, resets the modules cache correctly. +func TestVFlagUpdates(t *testing.T) { + setFlags() + defer sinks.file.swap(sinks.file.newBuffers()) + // Set -vmodule to some arbitrary value to make values read from cache. + // See log_flags.go:/func .* enabled/. + if err := flag.Lookup("vmodule").Value.Set("non_existent_module=3"); err != nil { + t.Fatalf("Failed to set -vmodule=log_test=3: %v", err) + } + defer flag.Lookup("vmodule").Value.Set("") + if err := flag.Lookup("v").Value.Set("3"); err != nil { + t.Fatalf("Failed to set -v=3: %v", err) + } + defer flag.Lookup("v").Value.Set("0") + + if !V(2) { + t.Error("V(2) not enabled for 2") + } + if !V(3) { + t.Error("V(3) not enabled for 3") + } + + // Setting a lower level should reset the modules cache. + if err := flag.Lookup("v").Value.Set("2"); err != nil { + t.Fatalf("Failed to set -v=2: %v", err) + } + if !V(2) { + t.Error("V(2) not enabled for 2") + } + if V(3) { + t.Error("V(3) enabled for 3") + } +} + +// Test that an arbitrary log.Level value does not modify -v. +func TestLevel(t *testing.T) { + setFlags() + defer sinks.file.swap(sinks.file.newBuffers()) + if err := flag.Lookup("v").Value.Set("3"); err != nil { + t.Fatalf("Failed to set -v=3: %v", err) + } + defer flag.Lookup("v").Value.Set("0") + + var l Level + if got, want := l.String(), "0"; got != want { + t.Errorf("l.String() = %q, want %q", got, want) + } + if err := l.Set("2"); err != nil { + t.Fatalf("l.Set(2) failed: %v", err) + } + if got, want := l.String(), "2"; got != want { + t.Errorf("l.String() = %q, want %q", got, want) + } + // -v flag should still be "3". + if got, want := flag.Lookup("v").Value.String(), "3"; got != want { + t.Errorf("-v=%v, want %v", got, want) + } +} + // Test that a vmodule enables a log in this file. func TestVmoduleOn(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("glog_test=2") - defer logging.vmodule.Set("") + defer sinks.file.swap(sinks.file.newBuffers()) + if err := flag.Lookup("vmodule").Value.Set("glog_test=2"); err != nil { + t.Fatalf("Failed to set -vmodule=log_test=2: %v", err) + } + defer flag.Lookup("vmodule").Value.Set("") + if !V(1) { t.Error("V not enabled for 1") } @@ -265,27 +336,67 @@ func TestVmoduleOn(t *testing.T) { t.Error("V enabled for 3") } V(2).Info("test") - if !contains(infoLog, "I", t) { - t.Errorf("Info has wrong character: %q", contents(infoLog)) + if !contains(logsink.Info, "I", t) { + t.Errorf("Info has wrong character: %q", contents(logsink.Info)) } - if !contains(infoLog, "test", t) { + if !contains(logsink.Info, "test", t) { t.Error("Info failed") } } +// Test that a VDepth calculates the depth correctly. +func TestVDepth(t *testing.T) { + setFlags() + defer sinks.file.swap(sinks.file.newBuffers()) + if err := flag.Lookup("vmodule").Value.Set("glog_test=3"); err != nil { + t.Fatalf("Failed to set -vmodule=glog_test=3: %v", err) + } + defer flag.Lookup("vmodule").Value.Set("") + + if !V(3) { + t.Error("V not enabled for 3") + } + if !VDepth(0, 2) { + t.Error("VDepth(0) not enabled for 2") + } + if !VDepth(0, 3) { + t.Error("VDepth(0) not enabled for 3") + } + if VDepth(0, 4) { + t.Error("VDepth(0) enabled for 4") + } + + // Since vmodule is set to glog_test=3, V(3) is true only for frames in + // glog_test. runInAnotherModule's stack frame is in log_vmodule_test, whereas + // this test and the provided closures are in glog_test. Therefore VDepth(0, 3) + // and VDepth(2, 3) are true, while VDepth(1, 3) is false. + if !runInAnotherModule(func() bool { return bool(VDepth(0, 3)) }) { + t.Error("VDepth(0) in closure not enabled for 3") + } + if runInAnotherModule(func() bool { return bool(VDepth(1, 3)) }) { + t.Error("VDepth(1) in closure enabled for 3") + } + if !runInAnotherModule(func() bool { return bool(VDepth(2, 3)) }) { + t.Error("VDepth(2) in closure not enabled for 3") + } +} + // Test that a vmodule of another file does not enable a log in this file. func TestVmoduleOff(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) - logging.vmodule.Set("notthisfile=2") - defer logging.vmodule.Set("") + defer sinks.file.swap(sinks.file.newBuffers()) + if err := flag.Lookup("vmodule").Value.Set("notthisfile=2"); err != nil { + t.Fatalf("Failed to set -vmodule=notthisfile=2: %v", err) + } + defer flag.Lookup("vmodule").Value.Set("") + for i := 1; i <= 3; i++ { if V(Level(i)) { t.Errorf("V enabled for %d", i) } } V(2).Info("test") - if contents(infoLog) != "" { + if contents(logsink.Info) != "" { t.Error("V logged incorrectly") } } @@ -310,10 +421,14 @@ var vGlobs = map[string]bool{ // Test that vmodule globbing works as advertised. func testVmoduleGlob(pat string, match bool, t *testing.T) { + t.Helper() setFlags() - defer logging.swap(logging.newBuffers()) - defer logging.vmodule.Set("") - logging.vmodule.Set(pat) + defer sinks.file.swap(sinks.file.newBuffers()) + if err := flag.Lookup("vmodule").Value.Set(pat); err != nil { + t.Errorf("Failed to set -vmodule=%s: %v", pat, err) + } + defer flag.Lookup("vmodule").Value.Set("") + if V(2) != Verbose(match) { t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match) } @@ -326,29 +441,39 @@ func TestVmoduleGlob(t *testing.T) { } } +// Test that a vmodule globbing on a full path works as advertised. +func TestVmoduleFullGlob(t *testing.T) { + _, file, _, _ := runtime.Caller(0) + for glob, match := range vGlobs { + testVmoduleGlob(filepath.Join(filepath.Dir(file), glob), match, t) + } +} + +// Test that a vmodule globbing across multiple directories works as advertised. +func TestVmoduleFullGlobMultipleDirectories(t *testing.T) { + // Note: only covering here what + // TestVmoduleGlob does not. + _, file, _, _ := runtime.Caller(0) + dir := filepath.Dir(filepath.Dir(file)) + testVmoduleGlob(filepath.Join(dir, "*/glog_test=2"), true, t) + testVmoduleGlob(filepath.Join(dir, "*/glog_????=2"), true, t) +} + +func logAtVariousLevels() { + V(3).Infof("level 3 message") + V(2).Infof("level 2 message") + V(1).Infof("level 1 message") + Infof("default level message") +} + func TestRollover(t *testing.T) { setFlags() - var err error - defer func(previous func(error)) { logExitFunc = previous }(logExitFunc) - logExitFunc = func(e error) { - err = e - } - defer func(previous uint64) { MaxSize = previous }(MaxSize) - MaxSize = 512 Info("x") // Be sure we have a file. - info, ok := logging.file[infoLog].(*syncBuffer) + info, ok := sinks.file.file[logsink.Info].(*syncBuffer) if !ok { t.Fatal("info wasn't created") } - if err != nil { - t.Fatalf("info has initial error: %v", err) - } - fname0 := info.file.Name() - Info(strings.Repeat("x", int(MaxSize))) // force a rollover - if err != nil { - t.Fatalf("info has error after big write: %v", err) - } // Make sure the next log file gets a file name with a different // time stamp. @@ -358,10 +483,25 @@ func TestRollover(t *testing.T) { // handle Daylight Savings Time properly). time.Sleep(1 * time.Second) - Info("x") // create a new file + // Measure the current size of the log file. + info.Flush() + fi, err := info.file.Stat() if err != nil { - t.Fatalf("error after rotation: %v", err) + t.Fatalf("Unable to stat log file %s: %v", info.file.Name(), err) } + + // Set MaxSize to a value that will accept one longMessage, but not two. + longMessage := strings.Repeat("x", 1024) + defer func(previous uint64) { MaxSize = previous }(MaxSize) + MaxSize = uint64(fi.Size()) + uint64(2*len(longMessage)) - 1 + + fname0 := info.file.Name() + + // Force a rotation. + Info(longMessage) + Info(longMessage) + info.Flush() + fname1 := info.file.Name() if fname0 == fname1 { t.Errorf("info.f.Name did not change: %v", fname0) @@ -369,11 +509,67 @@ func TestRollover(t *testing.T) { if info.nbytes >= MaxSize { t.Errorf("file size was not reset: %d", info.nbytes) } + + // Check to see if the original file has the continued footer. + f0, err := ioutil.ReadFile(fname0) + if err != nil { + t.Fatalf("Unable to read file %s: %v", fname0, err) + } + if !bytes.HasSuffix(f0, []byte(footer)) { + t.Errorf("%v: Missing footer %q", fname0, footer) + } + found := false + for _, l := range bytes.Split(f0, []byte("\n")) { + var file string + _, err = fmt.Sscanf(string(l), "Next log: %s\n", &file) + if err != nil { + continue + } + if file != fname1 { + t.Errorf("%v: Wanted next filename %s, got %s", fname0, fname1, file) + } + found = true + } + if !found { + t.Errorf("%v: Next log footer not found", fname0) + } + + // Check to see if the previous file header is there in the new file + f1, err := ioutil.ReadFile(fname1) + if err != nil { + t.Fatalf("Unable to read file %s: %v", fname1, err) + } + found = false + for _, l := range bytes.Split(f1, []byte("\n")) { + var file string + _, err = fmt.Sscanf(string(l), "Previous log: %s\n", &file) + if err != nil { + continue + } + if file != fname0 { + t.Errorf("%v: Wanted previous filename %s, got %s", fname1, fname0, file) + } + found = true + } + if !found { + t.Errorf("%v: Previous log header not found", fname1) + } + + // Make sure Names returned the right names. + n, err := Names("INFO") + if len(n) != 2 && err != nil && n[0] != fname0 && n[1] != fname1 { + t.Errorf("Names(INFO) wanted [%s, %s]/nil, got %v/%v", fname0, fname1, n, err) + } + + if t.Failed() { + t.Logf("%v:\n%q", fname0, f0) + t.Logf("%v:\n%q", fname1, f1) + } } func TestLogBacktraceAt(t *testing.T) { setFlags() - defer logging.swap(logging.newBuffers()) + defer sinks.file.swap(sinks.file.newBuffers()) // The peculiar style of this code simplifies line counting and maintenance of the // tracing block below. var infoLine string @@ -383,7 +579,7 @@ func TestLogBacktraceAt(t *testing.T) { } _, file = filepath.Split(file) infoLine = fmt.Sprintf("%s:%d", file, line+delta) - err := logging.traceLocation.Set(infoLine) + err := logBacktraceAt.Set(infoLine) if err != nil { t.Fatal("error setting log_backtrace_at: ", err) } @@ -394,22 +590,92 @@ func TestLogBacktraceAt(t *testing.T) { setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. Info("we want a stack trace here") } - numAppearances := strings.Count(contents(infoLog), infoLine) + numAppearances := strings.Count(contents(logsink.Info), infoLine) + if numAppearances < 2 { + // Need 2 appearances, one in the log header and one in the trace: + // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here + // ... + // .../glog/glog_test.go:280 (0x41ba91) + // ... + // We could be more precise but that would require knowing the details + // of the traceback format, which may not be dependable. + t.Fatal("got no trace back; log is ", contents(logsink.Info)) + } +} + +func TestNewStandardLoggerLogBacktraceAt(t *testing.T) { + setFlags() + defer sinks.file.swap(sinks.file.newBuffers()) + s := NewStandardLogger("INFO") + // The peculiar style of this code simplifies line counting and maintenance of the + // tracing block below. + var infoLine string + setTraceLocation := func(file string, line int, ok bool, delta int) { + if !ok { + t.Fatal("could not get file:line") + } + _, file = filepath.Split(file) + infoLine = fmt.Sprintf("%s:%d", file, line+delta) + err := logBacktraceAt.Set(infoLine) + if err != nil { + t.Fatal("error setting log_backtrace_at: ", err) + } + } + { + // Start of tracing block. These lines know about each other's relative position. + _, file, line, ok := runtime.Caller(0) + setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls. + s.Printf("we want a stack trace here") + } + infoContents := contents(logsink.Info) + if strings.Contains(infoContents, infoLine+"] [") { + t.Fatal("got extra bracketing around log line contents; log is ", infoContents) + } + numAppearances := strings.Count(infoContents, infoLine) if numAppearances < 2 { // Need 2 appearances, one in the log header and one in the trace: // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here // ... - // github.com/glog/glog_test.go:280 (0x41ba91) + // .../glog/glog_test.go:280 (0x41ba91) // ... // We could be more precise but that would require knowing the details // of the traceback format, which may not be dependable. - t.Fatal("got no trace back; log is ", contents(infoLog)) + t.Fatal("got no trace back; log is ", infoContents) } } -func BenchmarkHeader(b *testing.B) { - for i := 0; i < b.N; i++ { - buf, _, _ := logging.header(infoLog, 0) - logging.putBuffer(buf) +// Test to make sure the log naming function works properly. +func TestLogNames(t *testing.T) { + setFlags() + defer sinks.file.swap(sinks.file.newBuffers()) + n, e := Names("FOO") + if e == nil { + t.Errorf("Names(FOO) was %v/nil, should be []/error", n) + } + + // Set the infoLog to nil to simulate "log not yet written to" + h := sinks.file.file[logsink.Info] + sinks.file.file[logsink.Info] = nil + n, e = Names("INFO") + if e != ErrNoLog { + t.Errorf("Names(INFO) was %v/%v, should be [], ErrNoLog", n, e) + } + sinks.file.file[logsink.Info] = h + + // Get the name; testing has a fixed fake name for these. + Info("test") + n, e = Names("INFO") + if len(n) != 1 && n[0] != "" { + t.Errorf("Names(INFO) got %s, want ", n) + } +} + +func TestLogLength(t *testing.T) { + setFlags() + defer sinks.file.swap(sinks.file.newBuffers()) + Info(strings.Repeat("X", logsink.MaxLogMessageLen*2)) + if c := contents(logsink.Info); len(c) != logsink.MaxLogMessageLen { + t.Errorf("Info was not truncated: got length %d, want %d, contents %q", + len(c), logsink.MaxLogMessageLen, c) } } diff --git a/glog_vmodule_test.go b/glog_vmodule_test.go new file mode 100644 index 000000000..d483abdde --- /dev/null +++ b/glog_vmodule_test.go @@ -0,0 +1,10 @@ +package glog + +// runInAnotherModule is a simple wrapper that, being defined in another file, +// provides a different vmodule stack frame on the stack for use with +// glog.*Depth testing. +// +//go:noinline +func runInAnotherModule(f func() bool) bool { + return f() +} diff --git a/go.mod b/go.mod index 376dc05b9..40da259fa 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,3 @@ module github.com/golang/glog -go 1.11 +go 1.18 diff --git a/internal/logsink/logsink.go b/internal/logsink/logsink.go new file mode 100644 index 000000000..53758e1c9 --- /dev/null +++ b/internal/logsink/logsink.go @@ -0,0 +1,387 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package logsink + +import ( + "bytes" + "fmt" + "strconv" + "strings" + "sync" + "time" + + "github.com/golang/glog/internal/stackdump" +) + +// MaxLogMessageLen is the limit on length of a formatted log message, including +// the standard line prefix and trailing newline. +// +// Chosen to match C++ glog. +const MaxLogMessageLen = 15000 + +// A Severity is a severity at which a message can be logged. +type Severity int8 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + Info Severity = iota + Warning + Error + + // Fatal contains logs written immediately before the process terminates. + // + // Sink implementations should not terminate the process themselves: the log + // package will perform any necessary cleanup and terminate the process as + // appropriate. + Fatal +) + +func (s Severity) String() string { + switch s { + case Info: + return "INFO" + case Warning: + return "WARNING" + case Error: + return "ERROR" + case Fatal: + return "FATAL" + } + return fmt.Sprintf("%T(%d)", s, s) +} + +// ParseSeverity returns the case-insensitive Severity value for the given string. +func ParseSeverity(name string) (Severity, error) { + name = strings.ToUpper(name) + for s := Info; s <= Fatal; s++ { + if s.String() == name { + return s, nil + } + } + return -1, fmt.Errorf("logsink: invalid severity %q", name) +} + +// Meta is metadata about a logging call. +type Meta struct { + // Time is the time at which the log call was made. + Time time.Time + + // File is the source file from which the log entry originates. + File string + // Line is the line offset within the source file. + Line int + // Depth is the number of stack frames between the logsink and the log call. + Depth int + + Severity Severity + + // Verbose indicates whether the call was made via "log.V". Log entries below + // the current verbosity threshold are not sent to the sink. + Verbose bool + + // Thread ID. This can be populated with a thread ID from another source, + // such as a system we are importing logs from. In the normal case, this + // will be set to the process ID (PID), since Go doesn't have threads. + Thread int64 + + // Stack trace starting in the logging function. May be nil. + // A logsink should implement the StackWanter interface to request this. + // + // Even if WantStack returns false, this field may be set (e.g. if another + // sink wants a stack trace). + Stack *stackdump.Stack +} + +// Structured is a logging destination that accepts structured data as input. +type Structured interface { + // Printf formats according to a fmt.Printf format specifier and writes a log + // entry. The precise result of formatting depends on the sink, but should + // aim for consistency with fmt.Printf. + // + // Printf returns the number of bytes occupied by the log entry, which + // may not be equal to the total number of bytes written. + // + // Printf returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + Printf(meta *Meta, format string, a ...any) (n int, err error) +} + +// StackWanter can be implemented by a logsink.Structured to indicate that it +// wants a stack trace to accompany at least some of the log messages it receives. +type StackWanter interface { + // WantStack returns true if the sink requires a stack trace for a log message + // with this metadata. + // + // NOTE: Returning true implies that meta.Stack will be non-nil. Returning + // false does NOT imply that meta.Stack will be nil. + WantStack(meta *Meta) bool +} + +// Text is a logging destination that accepts pre-formatted log lines (instead of +// structured data). +type Text interface { + // Enabled returns whether this sink should output messages for the given + // Meta. If the sink returns false for a given Meta, the Printf function will + // not call Emit on it for the corresponding log message. + Enabled(*Meta) bool + + // Emit writes a pre-formatted text log entry (including any applicable + // header) to the log. It returns the number of bytes occupied by the entry + // (which may differ from the length of the passed-in slice). + // + // Emit returns any error encountered *if* it is severe enough that the log + // package should terminate the process. + // + // The sink must not modify the *Meta parameter, nor reference it after + // Printf has returned: it may be reused in subsequent calls. + // + // NOTE: When developing a text sink, keep in mind the surface in which the + // logs will be displayed, and whether it's important that the sink be + // resistent to tampering in the style of b/211428300. Standard text sinks + // (like `stderrSink`) do not protect against this (e.g. by escaping + // characters) because the cases where they would show user-influenced bytes + // are vanishingly small. + Emit(*Meta, []byte) (n int, err error) +} + +// bufs is a pool of *bytes.Buffer used in formatting log entries. +var bufs sync.Pool // Pool of *bytes.Buffer. + +// textPrintf formats a text log entry and emits it to all specified Text sinks. +// +// The returned n is the maximum across all Emit calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func textPrintf(m *Meta, textSinks []Text, format string, args ...any) (n int, err error) { + // We expect at most file, stderr, and perhaps syslog. If there are more, + // we'll end up allocating - no big deal. + const maxExpectedTextSinks = 3 + var noAllocSinks [maxExpectedTextSinks]Text + + sinks := noAllocSinks[:0] + for _, s := range textSinks { + if s.Enabled(m) { + sinks = append(sinks, s) + } + } + if len(sinks) == 0 && m.Severity != Fatal { + return 0, nil // No TextSinks specified; don't bother formatting. + } + + bufi := bufs.Get() + var buf *bytes.Buffer + if bufi == nil { + buf = bytes.NewBuffer(nil) + bufi = buf + } else { + buf = bufi.(*bytes.Buffer) + buf.Reset() + } + + // Lmmdd hh:mm:ss.uuuuuu PID/GID file:line] + // + // The "PID" entry arguably ought to be TID for consistency with other + // environments, but TID is not meaningful in a Go program due to the + // multiplexing of goroutines across threads. + // + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + const severityChar = "IWEF" + buf.WriteByte(severityChar[m.Severity]) + + _, month, day := m.Time.Date() + hour, minute, second := m.Time.Clock() + twoDigits(buf, int(month)) + twoDigits(buf, day) + buf.WriteByte(' ') + twoDigits(buf, hour) + buf.WriteByte(':') + twoDigits(buf, minute) + buf.WriteByte(':') + twoDigits(buf, second) + buf.WriteByte('.') + nDigits(buf, 6, uint64(m.Time.Nanosecond()/1000), '0') + buf.WriteByte(' ') + + nDigits(buf, 7, uint64(m.Thread), ' ') + buf.WriteByte(' ') + + { + file := m.File + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + buf.WriteString(file) + } + + buf.WriteByte(':') + { + var tmp [19]byte + buf.Write(strconv.AppendInt(tmp[:0], int64(m.Line), 10)) + } + buf.WriteString("] ") + + msgStart := buf.Len() + fmt.Fprintf(buf, format, args...) + if buf.Len() > MaxLogMessageLen-1 { + buf.Truncate(MaxLogMessageLen - 1) + } + msgEnd := buf.Len() + if b := buf.Bytes(); b[len(b)-1] != '\n' { + buf.WriteByte('\n') + } + + for _, s := range sinks { + sn, sErr := s.Emit(m, buf.Bytes()) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + + if m.Severity == Fatal { + savedM := *m + fatalMessageStore(savedEntry{ + meta: &savedM, + msg: buf.Bytes()[msgStart:msgEnd], + }) + } else { + bufs.Put(bufi) + } + return n, err +} + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer to buf. +func twoDigits(buf *bytes.Buffer, d int) { + buf.WriteByte(digits[(d/10)%10]) + buf.WriteByte(digits[d%10]) +} + +// nDigits formats an n-digit integer to buf, padding with pad on the left. It +// assumes d != 0. +func nDigits(buf *bytes.Buffer, n int, d uint64, pad byte) { + var tmp [20]byte + + cutoff := len(tmp) - n + j := len(tmp) - 1 + for ; d > 0; j-- { + tmp[j] = digits[d%10] + d /= 10 + } + for ; j >= cutoff; j-- { + tmp[j] = pad + } + j++ + buf.Write(tmp[j:]) +} + +// Printf writes a log entry to all registered TextSinks in this package, then +// to all registered StructuredSinks. +// +// The returned n is the maximum across all Emit and Printf calls. +// The returned err is the first non-nil error encountered. +// Sinks that are disabled by configuration should return (0, nil). +func Printf(m *Meta, format string, args ...any) (n int, err error) { + m.Depth++ + n, err = textPrintf(m, TextSinks, format, args...) + + for _, sink := range StructuredSinks { + // TODO: Support TextSinks that implement StackWanter? + if sw, ok := sink.(StackWanter); ok && sw.WantStack(m) { + if m.Stack == nil { + // First, try to find a stacktrace in args, otherwise generate one. + for _, arg := range args { + if stack, ok := arg.(stackdump.Stack); ok { + m.Stack = &stack + break + } + } + if m.Stack == nil { + stack := stackdump.Caller( /* skipDepth = */ m.Depth) + m.Stack = &stack + } + } + } + sn, sErr := sink.Printf(m, format, args...) + if sn > n { + n = sn + } + if sErr != nil && err == nil { + err = sErr + } + } + return n, err +} + +// The sets of sinks to which logs should be written. +// +// These must only be modified during package init, and are read-only thereafter. +var ( + // StructuredSinks is the set of Structured sink instances to which logs + // should be written. + StructuredSinks []Structured + + // TextSinks is the set of Text sink instances to which logs should be + // written. + // + // These are registered separately from Structured sink implementations to + // avoid the need to repeat the work of formatting a message for each Text + // sink that writes it. The package-level Printf function writes to both sets + // independenty, so a given log destination should only register a Structured + // *or* a Text sink (not both). + TextSinks []Text +) + +type savedEntry struct { + meta *Meta + msg []byte +} + +// StructuredTextWrapper is a Structured sink which forwards logs to a set of Text sinks. +// +// The purpose of this sink is to allow applications to intercept logging calls before they are +// serialized and sent to Text sinks. For example, if one needs to redact PII from logging +// arguments before they reach STDERR, one solution would be to do the redacting in a Structured +// sink that forwards logs to a StructuredTextWrapper instance, and make STDERR a child of that +// StructuredTextWrapper instance. This is how one could set this up in their application: +// +// func init() { +// +// wrapper := logsink.StructuredTextWrapper{TextSinks: logsink.TextSinks} +// // sanitizersink will intercept logs and remove PII +// sanitizer := sanitizersink{Sink: &wrapper} +// logsink.StructuredSinks = append(logsink.StructuredSinks, &sanitizer) +// logsink.TextSinks = nil +// +// } +type StructuredTextWrapper struct { + // TextSinks is the set of Text sinks that should receive logs from this + // StructuredTextWrapper instance. + TextSinks []Text +} + +// Printf forwards logs to all Text sinks registered in the StructuredTextWrapper. +func (w *StructuredTextWrapper) Printf(meta *Meta, format string, args ...any) (n int, err error) { + return textPrintf(meta, w.TextSinks, format, args...) +} diff --git a/internal/logsink/logsink_fatal.go b/internal/logsink/logsink_fatal.go new file mode 100644 index 000000000..3dc269abc --- /dev/null +++ b/internal/logsink/logsink_fatal.go @@ -0,0 +1,35 @@ +package logsink + +import ( + "sync/atomic" + "unsafe" +) + +func fatalMessageStore(e savedEntry) { + // Only put a new one in if we haven't assigned before. + atomic.CompareAndSwapPointer(&fatalMessage, nil, unsafe.Pointer(&e)) +} + +var fatalMessage unsafe.Pointer // savedEntry stored with CompareAndSwapPointer + +// FatalMessage returns the Meta and message contents of the first message +// logged with Fatal severity, or false if none has occurred. +func FatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(atomic.LoadPointer(&fatalMessage)) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} + +// DoNotUseRacyFatalMessage is FatalMessage, but worse. +// +//go:norace +//go:nosplit +func DoNotUseRacyFatalMessage() (*Meta, []byte, bool) { + e := (*savedEntry)(fatalMessage) + if e == nil { + return nil, nil, false + } + return e.meta, e.msg, true +} diff --git a/internal/stackdump/stackdump.go b/internal/stackdump/stackdump.go new file mode 100644 index 000000000..3427c9d6b --- /dev/null +++ b/internal/stackdump/stackdump.go @@ -0,0 +1,127 @@ +// Copyright 2023 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdump provides wrappers for runtime.Stack and runtime.Callers +// with uniform support for skipping caller frames. +// +// ⚠ Unlike the functions in the runtime package, these may allocate a +// non-trivial quantity of memory: use them with care. ⚠ +package stackdump + +import ( + "bytes" + "runtime" +) + +// runtimeStackSelfFrames is 1 if runtime.Stack includes the call to +// runtime.Stack itself or 0 if it does not. +// +// As of 2016-04-27, the gccgo compiler includes runtime.Stack but the gc +// compiler does not. +var runtimeStackSelfFrames = func() int { + for n := 1 << 10; n < 1<<20; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if bytes.Contains(buf[:n], []byte("runtime.Stack")) { + return 1 + } else if n < len(buf) || bytes.Count(buf, []byte("\n")) >= 3 { + return 0 + } + } + return 0 +}() + +// Stack is a stack dump for a single goroutine. +type Stack struct { + // Text is a representation of the stack dump in a human-readable format. + Text []byte + + // PC is a representation of the stack dump using raw program counter values. + PC []uintptr +} + +func (s Stack) String() string { return string(s.Text) } + +// Caller returns the Stack dump for the calling goroutine, starting skipDepth +// frames before the caller of Caller. (Caller(0) provides a dump starting at +// the caller of this function.) +func Caller(skipDepth int) Stack { + return Stack{ + Text: CallerText(skipDepth + 1), + PC: CallerPC(skipDepth + 1), + } +} + +// CallerText returns a textual dump of the stack starting skipDepth frames before +// the caller. (CallerText(0) provides a dump starting at the caller of this +// function.) +func CallerText(skipDepth int) []byte { + for n := 1 << 10; ; n *= 2 { + buf := make([]byte, n) + n := runtime.Stack(buf, false) + if n < len(buf) { + return pruneFrames(skipDepth+1+runtimeStackSelfFrames, buf[:n]) + } + } +} + +// CallerPC returns a dump of the program counters of the stack starting +// skipDepth frames before the caller. (CallerPC(0) provides a dump starting at +// the caller of this function.) +func CallerPC(skipDepth int) []uintptr { + for n := 1 << 8; ; n *= 2 { + buf := make([]uintptr, n) + n := runtime.Callers(skipDepth+2, buf) + if n < len(buf) { + return buf[:n] + } + } +} + +// pruneFrames removes the topmost skipDepth frames of the first goroutine in a +// textual stack dump. It overwrites the passed-in slice. +// +// If there are fewer than skipDepth frames in the first goroutine's stack, +// pruneFrames prunes it to an empty stack and leaves the remaining contents +// intact. +func pruneFrames(skipDepth int, stack []byte) []byte { + headerLen := 0 + for i, c := range stack { + if c == '\n' { + headerLen = i + 1 + break + } + } + if headerLen == 0 { + return stack // No header line - not a well-formed stack trace. + } + + skipLen := headerLen + skipNewlines := skipDepth * 2 + for ; skipLen < len(stack) && skipNewlines > 0; skipLen++ { + c := stack[skipLen] + if c != '\n' { + continue + } + skipNewlines-- + skipLen++ + if skipNewlines == 0 || skipLen == len(stack) || stack[skipLen] == '\n' { + break + } + } + + pruned := stack[skipLen-headerLen:] + copy(pruned, stack[:headerLen]) + return pruned +} diff --git a/internal/stackdump/stackdump_test.go b/internal/stackdump/stackdump_test.go new file mode 100644 index 000000000..7f782ca93 --- /dev/null +++ b/internal/stackdump/stackdump_test.go @@ -0,0 +1,152 @@ +// stackdump_test checks that the heuristics the stackdump package applies to +// prune frames work as expected in production Go compilers. + +package stackdump_test + +import ( + "bytes" + "fmt" + "regexp" + "runtime" + "testing" + + "github.com/golang/glog/internal/stackdump" +) + +var file string + +func init() { + _, file, _, _ = runtime.Caller(0) +} + +func TestCallerText(t *testing.T) { + stack := stackdump.CallerText(0) + _, _, line, _ := runtime.Caller(0) + line-- + + wantRE := regexp.MustCompile(fmt.Sprintf( + `^goroutine \d+ \[running\]: +github.com/golang/glog/internal/stackdump_test\.TestCallerText(\([^)]*\))? + %v:%v.* +`, file, line)) + if !wantRE.Match(stack) { + t.Errorf("Stack dump:\n%s\nwant matching regexp:\n%s", stack, wantRE.String()) + + buf := make([]byte, len(stack)*2) + origStack := buf[:runtime.Stack(buf, false)] + t.Logf("Unpruned stack:\n%s", origStack) + } +} + +func callerAt(calls int, depth int) (stack []byte) { + if calls == 1 { + return stackdump.CallerText(depth) + } + return callerAt(calls-1, depth) +} + +func TestCallerTextSkip(t *testing.T) { + const calls = 3 + cases := []struct { + depth int + callerAtFrames int + wantEndOfStack bool + }{ + {depth: 0, callerAtFrames: calls}, + {depth: calls - 1, callerAtFrames: 1}, + {depth: calls, callerAtFrames: 0}, + {depth: calls + 1, callerAtFrames: 0}, + {depth: calls + 100, wantEndOfStack: true}, + } + + for _, tc := range cases { + stack := callerAt(calls, tc.depth) + + wantREBuf := bytes.NewBuffer(nil) + fmt.Fprintf(wantREBuf, `^goroutine \d+ \[running\]: +`) + if tc.wantEndOfStack { + fmt.Fprintf(wantREBuf, "\n|$") + } else { + for n := tc.callerAtFrames; n > 0; n-- { + fmt.Fprintf(wantREBuf, `github.com/golang/glog/internal/stackdump_test\.callerAt(\([^)]*\))? + %v:\d+.* +`, file) + } + + if tc.depth <= calls { + fmt.Fprintf(wantREBuf, `github.com/golang/glog/internal/stackdump_test\.TestCallerTextSkip(\([^)]*\))? + %v:\d+.* +`, file) + } + } + + wantRE := regexp.MustCompile(wantREBuf.String()) + + if !wantRE.Match(stack) { + t.Errorf("for %v calls, stackdump.CallerText(%v) =\n%s\n\nwant matching regexp:\n%s", calls, tc.depth, stack, wantRE.String()) + } + } +} + +func pcAt(calls int, depth int) (stack []uintptr) { + if calls == 1 { + return stackdump.CallerPC(depth) + } + stack = pcAt(calls-1, depth) + runtime.Gosched() // Thwart tail-call optimization. + return stack +} + +func TestCallerPC(t *testing.T) { + const calls = 3 + cases := []struct { + depth int + pcAtFrames int + wantEndOfStack bool + }{ + {depth: 0, pcAtFrames: calls}, + {depth: calls - 1, pcAtFrames: 1}, + {depth: calls, pcAtFrames: 0}, + {depth: calls + 1, pcAtFrames: 0}, + {depth: calls + 100, wantEndOfStack: true}, + } + + for _, tc := range cases { + stack := pcAt(calls, tc.depth) + if tc.wantEndOfStack { + if len(stack) != 0 { + t.Errorf("for %v calls, stackdump.CallerPC(%v) =\n%q\nwant []", calls, tc.depth, stack) + } + continue + } + + wantFuncs := []string{} + for n := tc.pcAtFrames; n > 0; n-- { + wantFuncs = append(wantFuncs, `github.com/golang/glog/internal/stackdump_test\.pcAt$`) + } + if tc.depth <= calls { + wantFuncs = append(wantFuncs, `^github.com/golang/glog/internal/stackdump_test\.TestCallerPC$`) + } + + gotFuncs := []string{} + for _, pc := range stack { + gotFuncs = append(gotFuncs, runtime.FuncForPC(pc).Name()) + } + if len(gotFuncs) > len(wantFuncs) { + gotFuncs = gotFuncs[:len(wantFuncs)] + } + + ok := true + for i, want := range wantFuncs { + re := regexp.MustCompile(want) + if i >= len(gotFuncs) || !re.MatchString(gotFuncs[i]) { + ok = false + break + } + } + if !ok { + t.Errorf("for %v calls, stackdump.CallerPC(%v) =\n%q\nwant %q", calls, tc.depth, gotFuncs, wantFuncs) + } + } +}