diff --git a/vendor/github.com/armon/go-socks5/LICENSE b/vendor/github.com/armon/go-socks5/LICENSE new file mode 100644 index 0000000000..a5df10e675 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/go-socks5/auth.go b/vendor/github.com/armon/go-socks5/auth.go new file mode 100644 index 0000000000..7811e2aaf0 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/auth.go @@ -0,0 +1,151 @@ +package socks5 + +import ( + "fmt" + "io" +) + +const ( + NoAuth = uint8(0) + noAcceptable = uint8(255) + UserPassAuth = uint8(2) + userAuthVersion = uint8(1) + authSuccess = uint8(0) + authFailure = uint8(1) +) + +var ( + UserAuthFailed = fmt.Errorf("User authentication failed") + NoSupportedAuth = fmt.Errorf("No supported authentication mechanism") +) + +// A Request encapsulates authentication state provided +// during negotiation +type AuthContext struct { + // Provided auth method + Method uint8 + // Payload provided during negotiation. + // Keys depend on the used auth method. + // For UserPassauth contains Username + Payload map[string]string +} + +type Authenticator interface { + Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) + GetCode() uint8 +} + +// NoAuthAuthenticator is used to handle the "No Authentication" mode +type NoAuthAuthenticator struct{} + +func (a NoAuthAuthenticator) GetCode() uint8 { + return NoAuth +} + +func (a NoAuthAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) { + _, err := writer.Write([]byte{socks5Version, NoAuth}) + return &AuthContext{NoAuth, nil}, err +} + +// UserPassAuthenticator is used to handle username/password based +// authentication +type UserPassAuthenticator struct { + Credentials CredentialStore +} + +func (a UserPassAuthenticator) GetCode() uint8 { + return UserPassAuth +} + +func (a UserPassAuthenticator) Authenticate(reader io.Reader, writer io.Writer) (*AuthContext, error) { + // Tell the client to use user/pass auth + if _, err := writer.Write([]byte{socks5Version, UserPassAuth}); err != nil { + return nil, err + } + + // Get the version and username length + header := []byte{0, 0} + if _, err := io.ReadAtLeast(reader, header, 2); err != nil { + return nil, err + } + + // Ensure we are compatible + if header[0] != userAuthVersion { + return nil, fmt.Errorf("Unsupported auth version: %v", header[0]) + } + + // Get the user name + userLen := int(header[1]) + user := make([]byte, userLen) + if _, err := io.ReadAtLeast(reader, user, userLen); err != nil { + return nil, err + } + + // Get the password length + if _, err := reader.Read(header[:1]); err != nil { + return nil, err + } + + // Get the password + passLen := int(header[0]) + pass := make([]byte, passLen) + if _, err := io.ReadAtLeast(reader, pass, passLen); err != nil { + return nil, err + } + + // Verify the password + if a.Credentials.Valid(string(user), string(pass)) { + if _, err := writer.Write([]byte{userAuthVersion, authSuccess}); err != nil { + return nil, err + } + } else { + if _, err := writer.Write([]byte{userAuthVersion, authFailure}); err != nil { + return nil, err + } + return nil, UserAuthFailed + } + + // Done + return &AuthContext{UserPassAuth, map[string]string{"Username": string(user)}}, nil +} + +// authenticate is used to handle connection authentication +func (s *Server) authenticate(conn io.Writer, bufConn io.Reader) (*AuthContext, error) { + // Get the methods + methods, err := readMethods(bufConn) + if err != nil { + return nil, fmt.Errorf("Failed to get auth methods: %v", err) + } + + // Select a usable method + for _, method := range methods { + cator, found := s.authMethods[method] + if found { + return cator.Authenticate(bufConn, conn) + } + } + + // No usable method found + return nil, noAcceptableAuth(conn) +} + +// noAcceptableAuth is used to handle when we have no eligible +// authentication mechanism +func noAcceptableAuth(conn io.Writer) error { + conn.Write([]byte{socks5Version, noAcceptable}) + return NoSupportedAuth +} + +// readMethods is used to read the number of methods +// and proceeding auth methods +func readMethods(r io.Reader) ([]byte, error) { + header := []byte{0} + if _, err := r.Read(header); err != nil { + return nil, err + } + + numMethods := int(header[0]) + methods := make([]byte, numMethods) + _, err := io.ReadAtLeast(r, methods, numMethods) + return methods, err +} diff --git a/vendor/github.com/armon/go-socks5/credentials.go b/vendor/github.com/armon/go-socks5/credentials.go new file mode 100644 index 0000000000..966642738e --- /dev/null +++ b/vendor/github.com/armon/go-socks5/credentials.go @@ -0,0 +1,17 @@ +package socks5 + +// CredentialStore is used to support user/pass authentication +type CredentialStore interface { + Valid(user, password string) bool +} + +// StaticCredentials enables using a map directly as a credential store +type StaticCredentials map[string]string + +func (s StaticCredentials) Valid(user, password string) bool { + pass, ok := s[user] + if !ok { + return false + } + return password == pass +} diff --git a/vendor/github.com/armon/go-socks5/request.go b/vendor/github.com/armon/go-socks5/request.go new file mode 100644 index 0000000000..b615fcbede --- /dev/null +++ b/vendor/github.com/armon/go-socks5/request.go @@ -0,0 +1,364 @@ +package socks5 + +import ( + "fmt" + "io" + "net" + "strconv" + "strings" + + "golang.org/x/net/context" +) + +const ( + ConnectCommand = uint8(1) + BindCommand = uint8(2) + AssociateCommand = uint8(3) + ipv4Address = uint8(1) + fqdnAddress = uint8(3) + ipv6Address = uint8(4) +) + +const ( + successReply uint8 = iota + serverFailure + ruleFailure + networkUnreachable + hostUnreachable + connectionRefused + ttlExpired + commandNotSupported + addrTypeNotSupported +) + +var ( + unrecognizedAddrType = fmt.Errorf("Unrecognized address type") +) + +// AddressRewriter is used to rewrite a destination transparently +type AddressRewriter interface { + Rewrite(ctx context.Context, request *Request) (context.Context, *AddrSpec) +} + +// AddrSpec is used to return the target AddrSpec +// which may be specified as IPv4, IPv6, or a FQDN +type AddrSpec struct { + FQDN string + IP net.IP + Port int +} + +func (a *AddrSpec) String() string { + if a.FQDN != "" { + return fmt.Sprintf("%s (%s):%d", a.FQDN, a.IP, a.Port) + } + return fmt.Sprintf("%s:%d", a.IP, a.Port) +} + +// Address returns a string suitable to dial; prefer returning IP-based +// address, fallback to FQDN +func (a AddrSpec) Address() string { + if 0 != len(a.IP) { + return net.JoinHostPort(a.IP.String(), strconv.Itoa(a.Port)) + } + return net.JoinHostPort(a.FQDN, strconv.Itoa(a.Port)) +} + +// A Request represents request received by a server +type Request struct { + // Protocol version + Version uint8 + // Requested command + Command uint8 + // AuthContext provided during negotiation + AuthContext *AuthContext + // AddrSpec of the the network that sent the request + RemoteAddr *AddrSpec + // AddrSpec of the desired destination + DestAddr *AddrSpec + // AddrSpec of the actual destination (might be affected by rewrite) + realDestAddr *AddrSpec + bufConn io.Reader +} + +type conn interface { + Write([]byte) (int, error) + RemoteAddr() net.Addr +} + +// NewRequest creates a new Request from the tcp connection +func NewRequest(bufConn io.Reader) (*Request, error) { + // Read the version byte + header := []byte{0, 0, 0} + if _, err := io.ReadAtLeast(bufConn, header, 3); err != nil { + return nil, fmt.Errorf("Failed to get command version: %v", err) + } + + // Ensure we are compatible + if header[0] != socks5Version { + return nil, fmt.Errorf("Unsupported command version: %v", header[0]) + } + + // Read in the destination address + dest, err := readAddrSpec(bufConn) + if err != nil { + return nil, err + } + + request := &Request{ + Version: socks5Version, + Command: header[1], + DestAddr: dest, + bufConn: bufConn, + } + + return request, nil +} + +// handleRequest is used for request processing after authentication +func (s *Server) handleRequest(req *Request, conn conn) error { + ctx := context.Background() + + // Resolve the address if we have a FQDN + dest := req.DestAddr + if dest.FQDN != "" { + ctx_, addr, err := s.config.Resolver.Resolve(ctx, dest.FQDN) + if err != nil { + if err := sendReply(conn, hostUnreachable, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Failed to resolve destination '%v': %v", dest.FQDN, err) + } + ctx = ctx_ + dest.IP = addr + } + + // Apply any address rewrites + req.realDestAddr = req.DestAddr + if s.config.Rewriter != nil { + ctx, req.realDestAddr = s.config.Rewriter.Rewrite(ctx, req) + } + + // Switch on the command + switch req.Command { + case ConnectCommand: + return s.handleConnect(ctx, conn, req) + case BindCommand: + return s.handleBind(ctx, conn, req) + case AssociateCommand: + return s.handleAssociate(ctx, conn, req) + default: + if err := sendReply(conn, commandNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Unsupported command: %v", req.Command) + } +} + +// handleConnect is used to handle a connect command +func (s *Server) handleConnect(ctx context.Context, conn conn, req *Request) error { + // Check if this is allowed + if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok { + if err := sendReply(conn, ruleFailure, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Connect to %v blocked by rules", req.DestAddr) + } else { + ctx = ctx_ + } + + // Attempt to connect + dial := s.config.Dial + if dial == nil { + dial = func(ctx context.Context, net_, addr string) (net.Conn, error) { + return net.Dial(net_, addr) + } + } + target, err := dial(ctx, "tcp", req.realDestAddr.Address()) + if err != nil { + msg := err.Error() + resp := hostUnreachable + if strings.Contains(msg, "refused") { + resp = connectionRefused + } else if strings.Contains(msg, "network is unreachable") { + resp = networkUnreachable + } + if err := sendReply(conn, resp, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Connect to %v failed: %v", req.DestAddr, err) + } + defer target.Close() + + // Send success + local := target.LocalAddr().(*net.TCPAddr) + bind := AddrSpec{IP: local.IP, Port: local.Port} + if err := sendReply(conn, successReply, &bind); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + + // Start proxying + errCh := make(chan error, 2) + go proxy(target, req.bufConn, errCh) + go proxy(conn, target, errCh) + + // Wait + for i := 0; i < 2; i++ { + e := <-errCh + if e != nil { + // return from this function closes target (and conn). + return e + } + } + return nil +} + +// handleBind is used to handle a connect command +func (s *Server) handleBind(ctx context.Context, conn conn, req *Request) error { + // Check if this is allowed + if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok { + if err := sendReply(conn, ruleFailure, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Bind to %v blocked by rules", req.DestAddr) + } else { + ctx = ctx_ + } + + // TODO: Support bind + if err := sendReply(conn, commandNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return nil +} + +// handleAssociate is used to handle a connect command +func (s *Server) handleAssociate(ctx context.Context, conn conn, req *Request) error { + // Check if this is allowed + if ctx_, ok := s.config.Rules.Allow(ctx, req); !ok { + if err := sendReply(conn, ruleFailure, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return fmt.Errorf("Associate to %v blocked by rules", req.DestAddr) + } else { + ctx = ctx_ + } + + // TODO: Support associate + if err := sendReply(conn, commandNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + return nil +} + +// readAddrSpec is used to read AddrSpec. +// Expects an address type byte, follwed by the address and port +func readAddrSpec(r io.Reader) (*AddrSpec, error) { + d := &AddrSpec{} + + // Get the address type + addrType := []byte{0} + if _, err := r.Read(addrType); err != nil { + return nil, err + } + + // Handle on a per type basis + switch addrType[0] { + case ipv4Address: + addr := make([]byte, 4) + if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil { + return nil, err + } + d.IP = net.IP(addr) + + case ipv6Address: + addr := make([]byte, 16) + if _, err := io.ReadAtLeast(r, addr, len(addr)); err != nil { + return nil, err + } + d.IP = net.IP(addr) + + case fqdnAddress: + if _, err := r.Read(addrType); err != nil { + return nil, err + } + addrLen := int(addrType[0]) + fqdn := make([]byte, addrLen) + if _, err := io.ReadAtLeast(r, fqdn, addrLen); err != nil { + return nil, err + } + d.FQDN = string(fqdn) + + default: + return nil, unrecognizedAddrType + } + + // Read the port + port := []byte{0, 0} + if _, err := io.ReadAtLeast(r, port, 2); err != nil { + return nil, err + } + d.Port = (int(port[0]) << 8) | int(port[1]) + + return d, nil +} + +// sendReply is used to send a reply message +func sendReply(w io.Writer, resp uint8, addr *AddrSpec) error { + // Format the address + var addrType uint8 + var addrBody []byte + var addrPort uint16 + switch { + case addr == nil: + addrType = ipv4Address + addrBody = []byte{0, 0, 0, 0} + addrPort = 0 + + case addr.FQDN != "": + addrType = fqdnAddress + addrBody = append([]byte{byte(len(addr.FQDN))}, addr.FQDN...) + addrPort = uint16(addr.Port) + + case addr.IP.To4() != nil: + addrType = ipv4Address + addrBody = []byte(addr.IP.To4()) + addrPort = uint16(addr.Port) + + case addr.IP.To16() != nil: + addrType = ipv6Address + addrBody = []byte(addr.IP.To16()) + addrPort = uint16(addr.Port) + + default: + return fmt.Errorf("Failed to format address: %v", addr) + } + + // Format the message + msg := make([]byte, 6+len(addrBody)) + msg[0] = socks5Version + msg[1] = resp + msg[2] = 0 // Reserved + msg[3] = addrType + copy(msg[4:], addrBody) + msg[4+len(addrBody)] = byte(addrPort >> 8) + msg[4+len(addrBody)+1] = byte(addrPort & 0xff) + + // Send the message + _, err := w.Write(msg) + return err +} + +type closeWriter interface { + CloseWrite() error +} + +// proxy is used to suffle data from src to destination, and sends errors +// down a dedicated channel +func proxy(dst io.Writer, src io.Reader, errCh chan error) { + _, err := io.Copy(dst, src) + if tcpConn, ok := dst.(closeWriter); ok { + tcpConn.CloseWrite() + } + errCh <- err +} diff --git a/vendor/github.com/armon/go-socks5/resolver.go b/vendor/github.com/armon/go-socks5/resolver.go new file mode 100644 index 0000000000..b75a5c4da0 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/resolver.go @@ -0,0 +1,23 @@ +package socks5 + +import ( + "net" + + "golang.org/x/net/context" +) + +// NameResolver is used to implement custom name resolution +type NameResolver interface { + Resolve(ctx context.Context, name string) (context.Context, net.IP, error) +} + +// DNSResolver uses the system DNS to resolve host names +type DNSResolver struct{} + +func (d DNSResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { + addr, err := net.ResolveIPAddr("ip", name) + if err != nil { + return ctx, nil, err + } + return ctx, addr.IP, err +} diff --git a/vendor/github.com/armon/go-socks5/ruleset.go b/vendor/github.com/armon/go-socks5/ruleset.go new file mode 100644 index 0000000000..ba0e3538e3 --- /dev/null +++ b/vendor/github.com/armon/go-socks5/ruleset.go @@ -0,0 +1,41 @@ +package socks5 + +import ( + "golang.org/x/net/context" +) + +// RuleSet is used to provide custom rules to allow or prohibit actions +type RuleSet interface { + Allow(ctx context.Context, req *Request) (context.Context, bool) +} + +// PermitAll returns a RuleSet which allows all types of connections +func PermitAll() RuleSet { + return &PermitCommand{true, true, true} +} + +// PermitNone returns a RuleSet which disallows all types of connections +func PermitNone() RuleSet { + return &PermitCommand{false, false, false} +} + +// PermitCommand is an implementation of the RuleSet which +// enables filtering supported commands +type PermitCommand struct { + EnableConnect bool + EnableBind bool + EnableAssociate bool +} + +func (p *PermitCommand) Allow(ctx context.Context, req *Request) (context.Context, bool) { + switch req.Command { + case ConnectCommand: + return ctx, p.EnableConnect + case BindCommand: + return ctx, p.EnableBind + case AssociateCommand: + return ctx, p.EnableAssociate + } + + return ctx, false +} diff --git a/vendor/github.com/armon/go-socks5/socks5.go b/vendor/github.com/armon/go-socks5/socks5.go new file mode 100644 index 0000000000..a17be68f4d --- /dev/null +++ b/vendor/github.com/armon/go-socks5/socks5.go @@ -0,0 +1,169 @@ +package socks5 + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + + "golang.org/x/net/context" +) + +const ( + socks5Version = uint8(5) +) + +// Config is used to setup and configure a Server +type Config struct { + // AuthMethods can be provided to implement custom authentication + // By default, "auth-less" mode is enabled. + // For password-based auth use UserPassAuthenticator. + AuthMethods []Authenticator + + // If provided, username/password authentication is enabled, + // by appending a UserPassAuthenticator to AuthMethods. If not provided, + // and AUthMethods is nil, then "auth-less" mode is enabled. + Credentials CredentialStore + + // Resolver can be provided to do custom name resolution. + // Defaults to DNSResolver if not provided. + Resolver NameResolver + + // Rules is provided to enable custom logic around permitting + // various commands. If not provided, PermitAll is used. + Rules RuleSet + + // Rewriter can be used to transparently rewrite addresses. + // This is invoked before the RuleSet is invoked. + // Defaults to NoRewrite. + Rewriter AddressRewriter + + // BindIP is used for bind or udp associate + BindIP net.IP + + // Logger can be used to provide a custom log target. + // Defaults to stdout. + Logger *log.Logger + + // Optional function for dialing out + Dial func(ctx context.Context, network, addr string) (net.Conn, error) +} + +// Server is reponsible for accepting connections and handling +// the details of the SOCKS5 protocol +type Server struct { + config *Config + authMethods map[uint8]Authenticator +} + +// New creates a new Server and potentially returns an error +func New(conf *Config) (*Server, error) { + // Ensure we have at least one authentication method enabled + if len(conf.AuthMethods) == 0 { + if conf.Credentials != nil { + conf.AuthMethods = []Authenticator{&UserPassAuthenticator{conf.Credentials}} + } else { + conf.AuthMethods = []Authenticator{&NoAuthAuthenticator{}} + } + } + + // Ensure we have a DNS resolver + if conf.Resolver == nil { + conf.Resolver = DNSResolver{} + } + + // Ensure we have a rule set + if conf.Rules == nil { + conf.Rules = PermitAll() + } + + // Ensure we have a log target + if conf.Logger == nil { + conf.Logger = log.New(os.Stdout, "", log.LstdFlags) + } + + server := &Server{ + config: conf, + } + + server.authMethods = make(map[uint8]Authenticator) + + for _, a := range conf.AuthMethods { + server.authMethods[a.GetCode()] = a + } + + return server, nil +} + +// ListenAndServe is used to create a listener and serve on it +func (s *Server) ListenAndServe(network, addr string) error { + l, err := net.Listen(network, addr) + if err != nil { + return err + } + return s.Serve(l) +} + +// Serve is used to serve connections from a listener +func (s *Server) Serve(l net.Listener) error { + for { + conn, err := l.Accept() + if err != nil { + return err + } + go s.ServeConn(conn) + } + return nil +} + +// ServeConn is used to serve a single connection. +func (s *Server) ServeConn(conn net.Conn) error { + defer conn.Close() + bufConn := bufio.NewReader(conn) + + // Read the version byte + version := []byte{0} + if _, err := bufConn.Read(version); err != nil { + s.config.Logger.Printf("[ERR] socks: Failed to get version byte: %v", err) + return err + } + + // Ensure we are compatible + if version[0] != socks5Version { + err := fmt.Errorf("Unsupported SOCKS version: %v", version) + s.config.Logger.Printf("[ERR] socks: %v", err) + return err + } + + // Authenticate the connection + authContext, err := s.authenticate(conn, bufConn) + if err != nil { + err = fmt.Errorf("Failed to authenticate: %v", err) + s.config.Logger.Printf("[ERR] socks: %v", err) + return err + } + + request, err := NewRequest(bufConn) + if err != nil { + if err == unrecognizedAddrType { + if err := sendReply(conn, addrTypeNotSupported, nil); err != nil { + return fmt.Errorf("Failed to send reply: %v", err) + } + } + return fmt.Errorf("Failed to read destination address: %v", err) + } + request.AuthContext = authContext + if client, ok := conn.RemoteAddr().(*net.TCPAddr); ok { + request.RemoteAddr = &AddrSpec{IP: client.IP, Port: client.Port} + } + + // Process the client request + if err := s.handleRequest(request, conn); err != nil { + err = fmt.Errorf("Failed to handle request: %v", err) + s.config.Logger.Printf("[ERR] socks: %v", err) + return err + } + + return nil +} diff --git a/vendor/github.com/bmizerany/assert/assert.go b/vendor/github.com/bmizerany/assert/assert.go new file mode 100644 index 0000000000..7409f985e8 --- /dev/null +++ b/vendor/github.com/bmizerany/assert/assert.go @@ -0,0 +1,76 @@ +package assert +// Testing helpers for doozer. + +import ( + "github.com/kr/pretty" + "reflect" + "testing" + "runtime" + "fmt" +) + +func assert(t *testing.T, result bool, f func(), cd int) { + if !result { + _, file, line, _ := runtime.Caller(cd + 1) + t.Errorf("%s:%d", file, line) + f() + t.FailNow() + } +} + +func equal(t *testing.T, exp, got interface{}, cd int, args ...interface{}) { + fn := func() { + for _, desc := range pretty.Diff(exp, got) { + t.Error("!", desc) + } + if len(args) > 0 { + t.Error("!", " -", fmt.Sprint(args...)) + } + } + result := reflect.DeepEqual(exp, got) + assert(t, result, fn, cd+1) +} + +func tt(t *testing.T, result bool, cd int, args ...interface{}) { + fn := func() { + t.Errorf("! Failure") + if len(args) > 0 { + t.Error("!", " -", fmt.Sprint(args...)) + } + } + assert(t, result, fn, cd+1) +} + +func T(t *testing.T, result bool, args ...interface{}) { + tt(t, result, 1, args...) +} + +func Tf(t *testing.T, result bool, format string, args ...interface{}) { + tt(t, result, 1, fmt.Sprintf(format, args...)) +} + +func Equal(t *testing.T, exp, got interface{}, args ...interface{}) { + equal(t, exp, got, 1, args...) +} + +func Equalf(t *testing.T, exp, got interface{}, format string, args ...interface{}) { + equal(t, exp, got, 1, fmt.Sprintf(format, args...)) +} + +func NotEqual(t *testing.T, exp, got interface{}, args ...interface{}) { + fn := func() { + t.Errorf("! Unexpected: <%#v>", exp) + if len(args) > 0 { + t.Error("!", " -", fmt.Sprint(args...)) + } + } + result := !reflect.DeepEqual(exp, got) + assert(t, result, fn, 1) +} + +func Panic(t *testing.T, err interface{}, fn func()) { + defer func() { + equal(t, err, recover(), 3) + }() + fn() +} diff --git a/vendor/github.com/bmizerany/assert/example/point.go b/vendor/github.com/bmizerany/assert/example/point.go new file mode 100644 index 0000000000..15789fe10f --- /dev/null +++ b/vendor/github.com/bmizerany/assert/example/point.go @@ -0,0 +1,5 @@ +package point + +type Point struct { + X, Y int +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/LICENSE b/vendor/github.com/fluent/fluent-logger-golang/fluent/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go new file mode 100644 index 0000000000..655f6233ca --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/fluent.go @@ -0,0 +1,309 @@ +package fluent + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "math" + "net" + "reflect" + "strconv" + "sync" + "time" +) + +const ( + defaultHost = "127.0.0.1" + defaultNetwork = "tcp" + defaultSocketPath = "" + defaultPort = 24224 + defaultTimeout = 3 * time.Second + defaultBufferLimit = 8 * 1024 * 1024 + defaultRetryWait = 500 + defaultMaxRetry = 13 + defaultReconnectWaitIncreRate = 1.5 +) + +type Config struct { + FluentPort int `json:"fluent_port"` + FluentHost string `json:"fluent_host"` + FluentNetwork string `json:"fluent_network"` + FluentSocketPath string `json:"fluent_socket_path"` + Timeout time.Duration `json:"timeout"` + BufferLimit int `json:"buffer_limit"` + RetryWait int `json:"retry_wait"` + MaxRetry int `json:"max_retry"` + TagPrefix string `json:"tag_prefix"` + AsyncConnect bool `json:"async_connect"` + MarshalAsJSON bool `json:"marshal_as_json"` +} + +type Fluent struct { + Config + + mubuff sync.Mutex + pending []byte + + muconn sync.Mutex + conn io.WriteCloser + reconnecting bool +} + +// New creates a new Logger. +func New(config Config) (f *Fluent, err error) { + if config.FluentNetwork == "" { + config.FluentNetwork = defaultNetwork + } + if config.FluentHost == "" { + config.FluentHost = defaultHost + } + if config.FluentPort == 0 { + config.FluentPort = defaultPort + } + if config.FluentSocketPath == "" { + config.FluentSocketPath = defaultSocketPath + } + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + if config.BufferLimit == 0 { + config.BufferLimit = defaultBufferLimit + } + if config.RetryWait == 0 { + config.RetryWait = defaultRetryWait + } + if config.MaxRetry == 0 { + config.MaxRetry = defaultMaxRetry + } + if config.AsyncConnect { + f = &Fluent{Config: config, reconnecting: true} + go f.reconnect() + } else { + f = &Fluent{Config: config, reconnecting: false} + err = f.connect() + } + return +} + +// Post writes the output for a logging event. +// +// Examples: +// +// // send string +// f.Post("tag_name", "data") +// +// // send map[string] +// mapStringData := map[string]string{ +// "foo": "bar", +// } +// f.Post("tag_name", mapStringData) +// +// // send message with specified time +// mapStringData := map[string]string{ +// "foo": "bar", +// } +// tm := time.Now() +// f.PostWithTime("tag_name", tm, mapStringData) +// +// // send struct +// structData := struct { +// Name string `msg:"name"` +// } { +// "john smith", +// } +// f.Post("tag_name", structData) +// +func (f *Fluent) Post(tag string, message interface{}) error { + timeNow := time.Now() + return f.PostWithTime(tag, timeNow, message) +} + +func (f *Fluent) PostWithTime(tag string, tm time.Time, message interface{}) error { + if len(f.TagPrefix) > 0 { + tag = f.TagPrefix + "." + tag + } + + msg := reflect.ValueOf(message) + msgtype := msg.Type() + + if msgtype.Kind() == reflect.Struct { + // message should be tagged by "codec" or "msg" + kv := make(map[string]interface{}) + fields := msgtype.NumField() + for i := 0; i < fields; i++ { + field := msgtype.Field(i) + name := field.Name + if n1 := field.Tag.Get("msg"); n1 != "" { + name = n1 + } else if n2 := field.Tag.Get("codec"); n2 != "" { + name = n2 + } + kv[name] = msg.FieldByIndex(field.Index).Interface() + } + return f.EncodeAndPostData(tag, tm, kv) + } + + if msgtype.Kind() != reflect.Map { + return errors.New("fluent#PostWithTime: message must be a map") + } else if msgtype.Key().Kind() != reflect.String { + return errors.New("fluent#PostWithTime: map keys must be strings") + } + + kv := make(map[string]interface{}) + for _, k := range msg.MapKeys() { + kv[k.String()] = msg.MapIndex(k).Interface() + } + + return f.EncodeAndPostData(tag, tm, kv) +} + +func (f *Fluent) EncodeAndPostData(tag string, tm time.Time, message interface{}) error { + var data []byte + var err error + if data, err = f.EncodeData(tag, tm, message); err != nil { + return fmt.Errorf("fluent#EncodeAndPostData: can't convert '%#v' to msgpack:%v", message, err) + } + return f.postRawData(data) +} + +// Deprecated: Use EncodeAndPostData instead +func (f *Fluent) PostRawData(data []byte) { + f.postRawData(data) +} + +func (f *Fluent) postRawData(data []byte) error { + if err := f.appendBuffer(data); err != nil { + return err + } + if err := f.send(); err != nil { + f.close() + return err + } + return nil +} + +// For sending forward protocol adopted JSON +type MessageChunk struct { + message Message +} + +// Golang default marshaler does not support +// ["value", "value2", {"key":"value"}] style marshaling. +// So, it should write JSON marshaler by hand. +func (chunk *MessageChunk) MarshalJSON() ([]byte, error) { + data, err := json.Marshal(chunk.message.Record) + return []byte(fmt.Sprintf("[\"%s\",%d,%s,null]", chunk.message.Tag, + chunk.message.Time, data)), err +} + +func (f *Fluent) EncodeData(tag string, tm time.Time, message interface{}) (data []byte, err error) { + timeUnix := tm.Unix() + if f.Config.MarshalAsJSON { + msg := Message{Tag: tag, Time: timeUnix, Record: message} + chunk := &MessageChunk{message: msg} + data, err = json.Marshal(chunk) + } else { + msg := &Message{Tag: tag, Time: timeUnix, Record: message} + data, err = msg.MarshalMsg(nil) + } + return +} + +// Close closes the connection. +func (f *Fluent) Close() (err error) { + if len(f.pending) > 0 { + err = f.send() + } + f.close() + return +} + +// appendBuffer appends data to buffer with lock. +func (f *Fluent) appendBuffer(data []byte) error { + f.mubuff.Lock() + defer f.mubuff.Unlock() + if len(f.pending)+len(data) > f.Config.BufferLimit { + return errors.New(fmt.Sprintf("fluent#appendBuffer: Buffer full, limit %v", f.Config.BufferLimit)) + } + f.pending = append(f.pending, data...) + return nil +} + +// close closes the connection. +func (f *Fluent) close() { + f.muconn.Lock() + if f.conn != nil { + f.conn.Close() + f.conn = nil + } + f.muconn.Unlock() +} + +// connect establishes a new connection using the specified transport. +func (f *Fluent) connect() (err error) { + f.muconn.Lock() + defer f.muconn.Unlock() + + switch f.Config.FluentNetwork { + case "tcp": + f.conn, err = net.DialTimeout(f.Config.FluentNetwork, f.Config.FluentHost+":"+strconv.Itoa(f.Config.FluentPort), f.Config.Timeout) + case "unix": + f.conn, err = net.DialTimeout(f.Config.FluentNetwork, f.Config.FluentSocketPath, f.Config.Timeout) + default: + err = net.UnknownNetworkError(f.Config.FluentNetwork) + } + + if err == nil { + f.reconnecting = false + } + return +} + +func e(x, y float64) int { + return int(math.Pow(x, y)) +} + +func (f *Fluent) reconnect() { + for i := 0; ; i++ { + err := f.connect() + if err == nil { + f.send() + return + } + if i == f.Config.MaxRetry { + // TODO: What we can do when connection failed MaxRetry times? + panic("fluent#reconnect: failed to reconnect!") + } + waitTime := f.Config.RetryWait * e(defaultReconnectWaitIncreRate, float64(i-1)) + time.Sleep(time.Duration(waitTime) * time.Millisecond) + } +} + +func (f *Fluent) send() error { + f.muconn.Lock() + defer f.muconn.Unlock() + + if f.conn == nil { + if f.reconnecting == false { + f.reconnecting = true + go f.reconnect() + } + return errors.New("fluent#send: can't send logs, client is reconnecting") + } + + f.mubuff.Lock() + defer f.mubuff.Unlock() + + var err error + if len(f.pending) > 0 { + _, err = f.conn.Write(f.pending) + if err != nil { + f.conn.Close() + f.conn = nil + } else { + f.pending = f.pending[:0] + } + } + return err +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go new file mode 100644 index 0000000000..268d614dfd --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto.go @@ -0,0 +1,24 @@ +//go:generate msgp + +package fluent + +//msgp:tuple Entry +type Entry struct { + Time int64 `msg:"time"` + Record interface{} `msg:"record"` +} + +//msgp:tuple Forward +type Forward struct { + Tag string `msg:"tag"` + Entries []Entry `msg:"entries"` + Option interface{} `msg:"option"` +} + +//msgp:tuple Message +type Message struct { + Tag string `msg:"tag"` + Time int64 `msg:"time"` + Record interface{} `msg:"record"` + Option interface{} `msg:"option"` +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go new file mode 100644 index 0000000000..afb9d6d31f --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go @@ -0,0 +1,372 @@ +package fluent + +// NOTE: THIS FILE WAS PRODUCED BY THE +// MSGP CODE GENERATION TOOL (github.com/tinylib/msgp) +// DO NOT EDIT + +import ( + "github.com/tinylib/msgp/msgp" +) + +// DecodeMsg implements msgp.Decodable +func (z *Entry) DecodeMsg(dc *msgp.Reader) (err error) { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + z.Time, err = dc.ReadInt64() + if err != nil { + return + } + z.Record, err = dc.ReadIntf() + if err != nil { + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z Entry) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(2) + if err != nil { + return + } + err = en.WriteInt64(z.Time) + if err != nil { + return + } + err = en.WriteIntf(z.Record) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z Entry) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, 2) + o = msgp.AppendInt64(o, z.Time) + o, err = msgp.AppendIntf(o, z.Record) + if err != nil { + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Entry) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + } + z.Time, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return + } + z.Record, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + o = bts + return +} + +func (z Entry) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Record) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Forward) DecodeMsg(dc *msgp.Reader) (err error) { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 3 { + err = msgp.ArrayError{Wanted: 3, Got: ssz} + return + } + z.Tag, err = dc.ReadString() + if err != nil { + return + } + var xsz uint32 + xsz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if cap(z.Entries) >= int(xsz) { + z.Entries = z.Entries[:xsz] + } else { + z.Entries = make([]Entry, xsz) + } + for xvk := range z.Entries { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + z.Entries[xvk].Time, err = dc.ReadInt64() + if err != nil { + return + } + z.Entries[xvk].Record, err = dc.ReadIntf() + if err != nil { + return + } + } + z.Option, err = dc.ReadIntf() + if err != nil { + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Forward) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(3) + if err != nil { + return + } + err = en.WriteString(z.Tag) + if err != nil { + return + } + err = en.WriteArrayHeader(uint32(len(z.Entries))) + if err != nil { + return + } + for xvk := range z.Entries { + err = en.WriteArrayHeader(2) + if err != nil { + return + } + err = en.WriteInt64(z.Entries[xvk].Time) + if err != nil { + return + } + err = en.WriteIntf(z.Entries[xvk].Record) + if err != nil { + return + } + } + err = en.WriteIntf(z.Option) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Forward) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, 3) + o = msgp.AppendString(o, z.Tag) + o = msgp.AppendArrayHeader(o, uint32(len(z.Entries))) + for xvk := range z.Entries { + o = msgp.AppendArrayHeader(o, 2) + o = msgp.AppendInt64(o, z.Entries[xvk].Time) + o, err = msgp.AppendIntf(o, z.Entries[xvk].Record) + if err != nil { + return + } + } + o, err = msgp.AppendIntf(o, z.Option) + if err != nil { + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Forward) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 3 { + err = msgp.ArrayError{Wanted: 3, Got: ssz} + return + } + } + z.Tag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + return + } + var xsz uint32 + xsz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if cap(z.Entries) >= int(xsz) { + z.Entries = z.Entries[:xsz] + } else { + z.Entries = make([]Entry, xsz) + } + for xvk := range z.Entries { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 2 { + err = msgp.ArrayError{Wanted: 2, Got: ssz} + return + } + } + z.Entries[xvk].Time, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return + } + z.Entries[xvk].Record, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + } + z.Option, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + o = bts + return +} + +func (z *Forward) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.ArrayHeaderSize + for xvk := range z.Entries { + s += msgp.ArrayHeaderSize + msgp.Int64Size + msgp.GuessSize(z.Entries[xvk].Record) + } + s += msgp.GuessSize(z.Option) + return +} + +// DecodeMsg implements msgp.Decodable +func (z *Message) DecodeMsg(dc *msgp.Reader) (err error) { + var ssz uint32 + ssz, err = dc.ReadArrayHeader() + if err != nil { + return + } + if ssz != 4 { + err = msgp.ArrayError{Wanted: 4, Got: ssz} + return + } + z.Tag, err = dc.ReadString() + if err != nil { + return + } + z.Time, err = dc.ReadInt64() + if err != nil { + return + } + z.Record, err = dc.ReadIntf() + if err != nil { + return + } + z.Option, err = dc.ReadIntf() + if err != nil { + return + } + return +} + +// EncodeMsg implements msgp.Encodable +func (z *Message) EncodeMsg(en *msgp.Writer) (err error) { + err = en.WriteArrayHeader(4) + if err != nil { + return + } + err = en.WriteString(z.Tag) + if err != nil { + return + } + err = en.WriteInt64(z.Time) + if err != nil { + return + } + err = en.WriteIntf(z.Record) + if err != nil { + return + } + err = en.WriteIntf(z.Option) + if err != nil { + return + } + return +} + +// MarshalMsg implements msgp.Marshaler +func (z *Message) MarshalMsg(b []byte) (o []byte, err error) { + o = msgp.Require(b, z.Msgsize()) + o = msgp.AppendArrayHeader(o, 4) + o = msgp.AppendString(o, z.Tag) + o = msgp.AppendInt64(o, z.Time) + o, err = msgp.AppendIntf(o, z.Record) + if err != nil { + return + } + o, err = msgp.AppendIntf(o, z.Option) + if err != nil { + return + } + return +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (z *Message) UnmarshalMsg(bts []byte) (o []byte, err error) { + { + var ssz uint32 + ssz, bts, err = msgp.ReadArrayHeaderBytes(bts) + if err != nil { + return + } + if ssz != 4 { + err = msgp.ArrayError{Wanted: 4, Got: ssz} + return + } + } + z.Tag, bts, err = msgp.ReadStringBytes(bts) + if err != nil { + return + } + z.Time, bts, err = msgp.ReadInt64Bytes(bts) + if err != nil { + return + } + z.Record, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + z.Option, bts, err = msgp.ReadIntfBytes(bts) + if err != nil { + return + } + o = bts + return +} + +func (z *Message) Msgsize() (s int) { + s = msgp.ArrayHeaderSize + msgp.StringPrefixSize + len(z.Tag) + msgp.Int64Size + msgp.GuessSize(z.Record) + msgp.GuessSize(z.Option) + return +} diff --git a/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go new file mode 100644 index 0000000000..8904726ddb --- /dev/null +++ b/vendor/github.com/fluent/fluent-logger-golang/fluent/version.go @@ -0,0 +1,3 @@ +package fluent + +const Version = "1.2.1" diff --git a/vendor/github.com/kr/pretty/License b/vendor/github.com/kr/pretty/License new file mode 100644 index 0000000000..05c783ccf6 --- /dev/null +++ b/vendor/github.com/kr/pretty/License @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/pretty/diff.go b/vendor/github.com/kr/pretty/diff.go new file mode 100644 index 0000000000..6aa7f743a2 --- /dev/null +++ b/vendor/github.com/kr/pretty/diff.go @@ -0,0 +1,265 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" +) + +type sbuf []string + +func (p *sbuf) Printf(format string, a ...interface{}) { + s := fmt.Sprintf(format, a...) + *p = append(*p, s) +} + +// Diff returns a slice where each element describes +// a difference between a and b. +func Diff(a, b interface{}) (desc []string) { + Pdiff((*sbuf)(&desc), a, b) + return desc +} + +// wprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type wprintfer struct{ w io.Writer } + +func (p *wprintfer) Printf(format string, a ...interface{}) { + fmt.Fprintf(p.w, format+"\n", a...) +} + +// Fdiff writes to w a description of the differences between a and b. +func Fdiff(w io.Writer, a, b interface{}) { + Pdiff(&wprintfer{w}, a, b) +} + +type Printfer interface { + Printf(format string, a ...interface{}) +} + +// Pdiff prints to p a description of the differences between a and b. +// It calls Printf once for each difference, with no trailing newline. +// The standard library log.Logger is a Printfer. +func Pdiff(p Printfer, a, b interface{}) { + diffPrinter{w: p}.diff(reflect.ValueOf(a), reflect.ValueOf(b)) +} + +type Logfer interface { + Logf(format string, a ...interface{}) +} + +// logprintfer calls Fprintf on w for each Printf call +// with a trailing newline. +type logprintfer struct{ l Logfer } + +func (p *logprintfer) Printf(format string, a ...interface{}) { + p.l.Logf(format, a...) +} + +// Ldiff prints to l a description of the differences between a and b. +// It calls Logf once for each difference, with no trailing newline. +// The standard library testing.T and testing.B are Logfers. +func Ldiff(l Logfer, a, b interface{}) { + Pdiff(&logprintfer{l}, a, b) +} + +type diffPrinter struct { + w Printfer + l string // label +} + +func (w diffPrinter) printf(f string, a ...interface{}) { + var l string + if w.l != "" { + l = w.l + ": " + } + w.w.Printf(l+f, a...) +} + +func (w diffPrinter) diff(av, bv reflect.Value) { + if !av.IsValid() && bv.IsValid() { + w.printf("nil != %# v", formatter{v: bv, quote: true}) + return + } + if av.IsValid() && !bv.IsValid() { + w.printf("%# v != nil", formatter{v: av, quote: true}) + return + } + if !av.IsValid() && !bv.IsValid() { + return + } + + at := av.Type() + bt := bv.Type() + if at != bt { + w.printf("%v != %v", at, bt) + return + } + + switch kind := at.Kind(); kind { + case reflect.Bool: + if a, b := av.Bool(), bv.Bool(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if a, b := av.Int(), bv.Int(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + if a, b := av.Uint(), bv.Uint(); a != b { + w.printf("%d != %d", a, b) + } + case reflect.Float32, reflect.Float64: + if a, b := av.Float(), bv.Float(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Complex64, reflect.Complex128: + if a, b := av.Complex(), bv.Complex(); a != b { + w.printf("%v != %v", a, b) + } + case reflect.Array: + n := av.Len() + for i := 0; i < n; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + if a, b := av.Pointer(), bv.Pointer(); a != b { + w.printf("%#x != %#x", a, b) + } + case reflect.Interface: + w.diff(av.Elem(), bv.Elem()) + case reflect.Map: + ak, both, bk := keyDiff(av.MapKeys(), bv.MapKeys()) + for _, k := range ak { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("%q != (missing)", av.MapIndex(k)) + } + for _, k := range both { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.diff(av.MapIndex(k), bv.MapIndex(k)) + } + for _, k := range bk { + w := w.relabel(fmt.Sprintf("[%#v]", k)) + w.printf("(missing) != %q", bv.MapIndex(k)) + } + case reflect.Ptr: + switch { + case av.IsNil() && !bv.IsNil(): + w.printf("nil != %# v", formatter{v: bv, quote: true}) + case !av.IsNil() && bv.IsNil(): + w.printf("%# v != nil", formatter{v: av, quote: true}) + case !av.IsNil() && !bv.IsNil(): + w.diff(av.Elem(), bv.Elem()) + } + case reflect.Slice: + lenA := av.Len() + lenB := bv.Len() + if lenA != lenB { + w.printf("%s[%d] != %s[%d]", av.Type(), lenA, bv.Type(), lenB) + break + } + for i := 0; i < lenA; i++ { + w.relabel(fmt.Sprintf("[%d]", i)).diff(av.Index(i), bv.Index(i)) + } + case reflect.String: + if a, b := av.String(), bv.String(); a != b { + w.printf("%q != %q", a, b) + } + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + w.relabel(at.Field(i).Name).diff(av.Field(i), bv.Field(i)) + } + default: + panic("unknown reflect Kind: " + kind.String()) + } +} + +func (d diffPrinter) relabel(name string) (d1 diffPrinter) { + d1 = d + if d.l != "" && name[0] != '[' { + d1.l += "." + } + d1.l += name + return d1 +} + +// keyEqual compares a and b for equality. +// Both a and b must be valid map keys. +func keyEqual(av, bv reflect.Value) bool { + if !av.IsValid() && !bv.IsValid() { + return true + } + if !av.IsValid() || !bv.IsValid() || av.Type() != bv.Type() { + return false + } + switch kind := av.Kind(); kind { + case reflect.Bool: + a, b := av.Bool(), bv.Bool() + return a == b + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + a, b := av.Int(), bv.Int() + return a == b + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + a, b := av.Uint(), bv.Uint() + return a == b + case reflect.Float32, reflect.Float64: + a, b := av.Float(), bv.Float() + return a == b + case reflect.Complex64, reflect.Complex128: + a, b := av.Complex(), bv.Complex() + return a == b + case reflect.Array: + for i := 0; i < av.Len(); i++ { + if !keyEqual(av.Index(i), bv.Index(i)) { + return false + } + } + return true + case reflect.Chan, reflect.UnsafePointer, reflect.Ptr: + a, b := av.Pointer(), bv.Pointer() + return a == b + case reflect.Interface: + return keyEqual(av.Elem(), bv.Elem()) + case reflect.String: + a, b := av.String(), bv.String() + return a == b + case reflect.Struct: + for i := 0; i < av.NumField(); i++ { + if !keyEqual(av.Field(i), bv.Field(i)) { + return false + } + } + return true + default: + panic("invalid map key type " + av.Type().String()) + } +} + +func keyDiff(a, b []reflect.Value) (ak, both, bk []reflect.Value) { + for _, av := range a { + inBoth := false + for _, bv := range b { + if keyEqual(av, bv) { + inBoth = true + both = append(both, av) + break + } + } + if !inBoth { + ak = append(ak, av) + } + } + for _, bv := range b { + inBoth := false + for _, av := range a { + if keyEqual(av, bv) { + inBoth = true + break + } + } + if !inBoth { + bk = append(bk, bv) + } + } + return +} diff --git a/vendor/github.com/kr/pretty/formatter.go b/vendor/github.com/kr/pretty/formatter.go new file mode 100644 index 0000000000..a317d7b8ee --- /dev/null +++ b/vendor/github.com/kr/pretty/formatter.go @@ -0,0 +1,328 @@ +package pretty + +import ( + "fmt" + "io" + "reflect" + "strconv" + "text/tabwriter" + + "github.com/kr/text" +) + +type formatter struct { + v reflect.Value + force bool + quote bool +} + +// Formatter makes a wrapper, f, that will format x as go source with line +// breaks and tabs. Object f responds to the "%v" formatting verb when both the +// "#" and " " (space) flags are set, for example: +// +// fmt.Sprintf("%# v", Formatter(x)) +// +// If one of these two flags is not set, or any other verb is used, f will +// format x according to the usual rules of package fmt. +// In particular, if x satisfies fmt.Formatter, then x.Format will be called. +func Formatter(x interface{}) (f fmt.Formatter) { + return formatter{v: reflect.ValueOf(x), quote: true} +} + +func (fo formatter) String() string { + return fmt.Sprint(fo.v.Interface()) // unwrap it +} + +func (fo formatter) passThrough(f fmt.State, c rune) { + s := "%" + for i := 0; i < 128; i++ { + if f.Flag(i) { + s += string(i) + } + } + if w, ok := f.Width(); ok { + s += fmt.Sprintf("%d", w) + } + if p, ok := f.Precision(); ok { + s += fmt.Sprintf(".%d", p) + } + s += string(c) + fmt.Fprintf(f, s, fo.v.Interface()) +} + +func (fo formatter) Format(f fmt.State, c rune) { + if fo.force || c == 'v' && f.Flag('#') && f.Flag(' ') { + w := tabwriter.NewWriter(f, 4, 4, 1, ' ', 0) + p := &printer{tw: w, Writer: w, visited: make(map[visit]int)} + p.printValue(fo.v, true, fo.quote) + w.Flush() + return + } + fo.passThrough(f, c) +} + +type printer struct { + io.Writer + tw *tabwriter.Writer + visited map[visit]int + depth int +} + +func (p *printer) indent() *printer { + q := *p + q.tw = tabwriter.NewWriter(p.Writer, 4, 4, 1, ' ', 0) + q.Writer = text.NewIndentWriter(q.tw, []byte{'\t'}) + return &q +} + +func (p *printer) printInline(v reflect.Value, x interface{}, showType bool) { + if showType { + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, "(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } +} + +// printValue must keep track of already-printed pointer values to avoid +// infinite recursion. +type visit struct { + v uintptr + typ reflect.Type +} + +func (p *printer) printValue(v reflect.Value, showType, quote bool) { + if p.depth > 10 { + io.WriteString(p, "!%v(DEPTH EXCEEDED)") + return + } + + switch v.Kind() { + case reflect.Bool: + p.printInline(v, v.Bool(), showType) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.printInline(v, v.Int(), showType) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.printInline(v, v.Uint(), showType) + case reflect.Float32, reflect.Float64: + p.printInline(v, v.Float(), showType) + case reflect.Complex64, reflect.Complex128: + fmt.Fprintf(p, "%#v", v.Complex()) + case reflect.String: + p.fmtString(v.String(), quote) + case reflect.Map: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + keys := v.MapKeys() + for i := 0; i < v.Len(); i++ { + showTypeInStruct := true + k := keys[i] + mv := v.MapIndex(k) + pp.printValue(k, false, true) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = t.Elem().Kind() == reflect.Interface + pp.printValue(mv, showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Struct: + t := v.Type() + if v.CanAddr() { + addr := v.UnsafeAddr() + vis := visit{addr, t} + if vd, ok := p.visited[vis]; ok && vd < p.depth { + p.fmtString(t.String()+"{(CYCLIC REFERENCE)}", false) + break // don't print v again + } + p.visited[vis] = p.depth + } + + if showType { + io.WriteString(p, t.String()) + } + writeByte(p, '{') + if nonzero(v) { + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.NumField(); i++ { + showTypeInStruct := true + if f := t.Field(i); f.Name != "" { + io.WriteString(pp, f.Name) + writeByte(pp, ':') + if expand { + writeByte(pp, '\t') + } + showTypeInStruct = labelType(f.Type) + } + pp.printValue(getField(v, i), showTypeInStruct, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.NumField()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + } + writeByte(p, '}') + case reflect.Interface: + switch e := v.Elem(); { + case e.Kind() == reflect.Invalid: + io.WriteString(p, "nil") + case e.IsValid(): + pp := *p + pp.depth++ + pp.printValue(e, showType, true) + default: + io.WriteString(p, v.Type().String()) + io.WriteString(p, "(nil)") + } + case reflect.Array, reflect.Slice: + t := v.Type() + if showType { + io.WriteString(p, t.String()) + } + if v.Kind() == reflect.Slice && v.IsNil() && showType { + io.WriteString(p, "(nil)") + break + } + if v.Kind() == reflect.Slice && v.IsNil() { + io.WriteString(p, "nil") + break + } + writeByte(p, '{') + expand := !canInline(v.Type()) + pp := p + if expand { + writeByte(p, '\n') + pp = p.indent() + } + for i := 0; i < v.Len(); i++ { + showTypeInSlice := t.Elem().Kind() == reflect.Interface + pp.printValue(v.Index(i), showTypeInSlice, true) + if expand { + io.WriteString(pp, ",\n") + } else if i < v.Len()-1 { + io.WriteString(pp, ", ") + } + } + if expand { + pp.tw.Flush() + } + writeByte(p, '}') + case reflect.Ptr: + e := v.Elem() + if !e.IsValid() { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + io.WriteString(p, ")(nil)") + } else { + pp := *p + pp.depth++ + writeByte(pp, '&') + pp.printValue(e, true, true) + } + case reflect.Chan: + x := v.Pointer() + if showType { + writeByte(p, '(') + io.WriteString(p, v.Type().String()) + fmt.Fprintf(p, ")(%#v)", x) + } else { + fmt.Fprintf(p, "%#v", x) + } + case reflect.Func: + io.WriteString(p, v.Type().String()) + io.WriteString(p, " {...}") + case reflect.UnsafePointer: + p.printInline(v, v.Pointer(), showType) + case reflect.Invalid: + io.WriteString(p, "nil") + } +} + +func canInline(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map: + return !canExpand(t.Elem()) + case reflect.Struct: + for i := 0; i < t.NumField(); i++ { + if canExpand(t.Field(i).Type) { + return false + } + } + return true + case reflect.Interface: + return false + case reflect.Array, reflect.Slice: + return !canExpand(t.Elem()) + case reflect.Ptr: + return false + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + return false + } + return true +} + +func canExpand(t reflect.Type) bool { + switch t.Kind() { + case reflect.Map, reflect.Struct, + reflect.Interface, reflect.Array, reflect.Slice, + reflect.Ptr: + return true + } + return false +} + +func labelType(t reflect.Type) bool { + switch t.Kind() { + case reflect.Interface, reflect.Struct: + return true + } + return false +} + +func (p *printer) fmtString(s string, quote bool) { + if quote { + s = strconv.Quote(s) + } + io.WriteString(p, s) +} + +func writeByte(w io.Writer, b byte) { + w.Write([]byte{b}) +} + +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} diff --git a/vendor/github.com/kr/pretty/pretty.go b/vendor/github.com/kr/pretty/pretty.go new file mode 100644 index 0000000000..49423ec7f5 --- /dev/null +++ b/vendor/github.com/kr/pretty/pretty.go @@ -0,0 +1,108 @@ +// Package pretty provides pretty-printing for Go values. This is +// useful during debugging, to avoid wrapping long output lines in +// the terminal. +// +// It provides a function, Formatter, that can be used with any +// function that accepts a format string. It also provides +// convenience wrappers for functions in packages fmt and log. +package pretty + +import ( + "fmt" + "io" + "log" + "reflect" +) + +// Errorf is a convenience wrapper for fmt.Errorf. +// +// Calling Errorf(f, x, y) is equivalent to +// fmt.Errorf(f, Formatter(x), Formatter(y)). +func Errorf(format string, a ...interface{}) error { + return fmt.Errorf(format, wrap(a, false)...) +} + +// Fprintf is a convenience wrapper for fmt.Fprintf. +// +// Calling Fprintf(w, f, x, y) is equivalent to +// fmt.Fprintf(w, f, Formatter(x), Formatter(y)). +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, error error) { + return fmt.Fprintf(w, format, wrap(a, false)...) +} + +// Log is a convenience wrapper for log.Printf. +// +// Calling Log(x, y) is equivalent to +// log.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Log(a ...interface{}) { + log.Print(wrap(a, true)...) +} + +// Logf is a convenience wrapper for log.Printf. +// +// Calling Logf(f, x, y) is equivalent to +// log.Printf(f, Formatter(x), Formatter(y)). +func Logf(format string, a ...interface{}) { + log.Printf(format, wrap(a, false)...) +} + +// Logln is a convenience wrapper for log.Printf. +// +// Calling Logln(x, y) is equivalent to +// log.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Logln(a ...interface{}) { + log.Println(wrap(a, true)...) +} + +// Print pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Print(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Print(a ...interface{}) (n int, errno error) { + return fmt.Print(wrap(a, true)...) +} + +// Printf is a convenience wrapper for fmt.Printf. +// +// Calling Printf(f, x, y) is equivalent to +// fmt.Printf(f, Formatter(x), Formatter(y)). +func Printf(format string, a ...interface{}) (n int, errno error) { + return fmt.Printf(format, wrap(a, false)...) +} + +// Println pretty-prints its operands and writes to standard output. +// +// Calling Print(x, y) is equivalent to +// fmt.Println(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Println(a ...interface{}) (n int, errno error) { + return fmt.Println(wrap(a, true)...) +} + +// Sprint is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprint(x, y) is equivalent to +// fmt.Sprint(Formatter(x), Formatter(y)), but each operand is +// formatted with "%# v". +func Sprint(a ...interface{}) string { + return fmt.Sprint(wrap(a, true)...) +} + +// Sprintf is a convenience wrapper for fmt.Sprintf. +// +// Calling Sprintf(f, x, y) is equivalent to +// fmt.Sprintf(f, Formatter(x), Formatter(y)). +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, wrap(a, false)...) +} + +func wrap(a []interface{}, force bool) []interface{} { + w := make([]interface{}, len(a)) + for i, x := range a { + w[i] = formatter{v: reflect.ValueOf(x), force: force} + } + return w +} diff --git a/vendor/github.com/kr/pretty/zero.go b/vendor/github.com/kr/pretty/zero.go new file mode 100644 index 0000000000..abb5b6fc14 --- /dev/null +++ b/vendor/github.com/kr/pretty/zero.go @@ -0,0 +1,41 @@ +package pretty + +import ( + "reflect" +) + +func nonzero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() != 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() != 0 + case reflect.Float32, reflect.Float64: + return v.Float() != 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() != complex(0, 0) + case reflect.String: + return v.String() != "" + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + if nonzero(getField(v, i)) { + return true + } + } + return false + case reflect.Array: + for i := 0; i < v.Len(); i++ { + if nonzero(v.Index(i)) { + return true + } + } + return false + case reflect.Map, reflect.Interface, reflect.Slice, reflect.Ptr, reflect.Chan, reflect.Func: + return !v.IsNil() + case reflect.UnsafePointer: + return v.Pointer() != 0 + } + return true +} diff --git a/vendor/github.com/kr/text/License b/vendor/github.com/kr/text/License new file mode 100644 index 0000000000..480a328059 --- /dev/null +++ b/vendor/github.com/kr/text/License @@ -0,0 +1,19 @@ +Copyright 2012 Keith Rarick + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/kr/text/cmd/agg/doc.go b/vendor/github.com/kr/text/cmd/agg/doc.go new file mode 100644 index 0000000000..3a9ec2db4f --- /dev/null +++ b/vendor/github.com/kr/text/cmd/agg/doc.go @@ -0,0 +1,73 @@ +/* + +Agg computes aggregate values over tabular text. +It behaves somewhat like the SQL “GROUP BY” clause. + +Usage: + + agg [function...] + +It reads input from stdin as a sequence of records, one per line. +It treats each line as a set of fields separated by white space. +One field (the first, by default) is designated as the key. +Successive lines with equal keys are grouped into a group, +and agg produces one line of output for each group. +(Note that only contiguous input lines can form a group. +If you need to make sure that all records for a given key +are grouped together, sort the input first.) + +For each remaining field, +agg applies a function to all the values in the group, +producing a single output value. +The command line arguments specify which functions to use, +one per field in the input table. + +Functions + +The available functions are: + + key group by this field (default for field 1) + first value from first line of group (default for rest) + last value from last line of group + sample value from any line of group, uniformly at random + prefix longest common string prefix + join:sep concatenate strings with given sep + smin lexically least string + smax lexically greatest string + min numerically least value + max numerically greatest value + sum numeric sum + mean arithmetic mean + count number of records (ignores input value) + const:val print val, ignoring input + drop omit the column entirely + +The numeric functions skip items that don't parse as numbers. + +Examples + +Using the following input: + + $ cat >input + -rwx alice 100 /home/alice/bin/crdt + -rw- alice 210002 /home/alice/thesis.tex + -rw- bob 10051 /home/bob/expenses.tab + -rwx kr 862060 /home/kr/bin/blog + -rwx kr 304608 /home/kr/bin/agg + +Disk usage for each user, plus where that disk usage occurs +(longest common prefix of filesystem paths): + + $ agg = 0 { + sym, argmap[i] = sym[:p], sym[p+1:] + } + if sym == "key" { + key, sym = i, "first" + } + f, ok := symtab[sym] + if !ok { + log.Fatalf("bad function: %q", sym) + } + funcmap[i] = f + } + + sc := bufio.NewScanner(os.Stdin) + var g *group + for sc.Scan() { + ss := strings.Fields(sc.Text()) + if !matches(g, ss) { + emit(g) + g = &group{key: ss[key]} + } + mergeLine(g, ss) + } + emit(g) +} + +type group struct { + key string + agg []agg +} + +func matches(g *group, ss []string) bool { + return g != nil && g.key == ss[key] +} + +func emit(g *group) { + if g == nil { + return + } + rest := false + for i, a := range g.agg { + if f, ok := funcmap[i]; ok && f == nil { + continue + } + if rest { + fmt.Print("\t") + } + rest = true + fmt.Print(a) + } + fmt.Println() +} + +func mergeLine(g *group, ss []string) { + for i, s := range ss { + if i >= len(g.agg) { + f := funcmap[i] + if f == nil { + f = first + } + g.agg = append(g.agg, f(s, argmap[i])) + } else { + g.agg[i].merge(s) + } + } +} diff --git a/vendor/github.com/kr/text/cmd/agg/num.go b/vendor/github.com/kr/text/cmd/agg/num.go new file mode 100644 index 0000000000..93ac3fe15e --- /dev/null +++ b/vendor/github.com/kr/text/cmd/agg/num.go @@ -0,0 +1,99 @@ +package main + +import ( + "math/big" + "strconv" +) + +func min(s, arg string) agg { return newBinop(s, opmin) } +func max(s, arg string) agg { return newBinop(s, opmax) } +func sum(s, arg string) agg { return newBinop(s, opsum) } + +type binop struct { + v *big.Float + f func(a, b *big.Float) *big.Float +} + +func newBinop(s string, f func(a, b *big.Float) *big.Float) *binop { + v, _ := parseFloat(s) + return &binop{v, f} +} + +func (o *binop) String() string { + if o.v == nil { + return "NaN" + } + return o.v.Text('f', -1) +} + +func (o *binop) merge(s string) { + v, ok := parseFloat(s) + if !ok { + return + } + o.v = o.f(o.v, v) +} + +func opmin(a, b *big.Float) *big.Float { + if a != nil && (b == nil || a.Cmp(b) <= 0) { + return a + } + return b +} + +func opmax(a, b *big.Float) *big.Float { + if a != nil && (b == nil || a.Cmp(b) >= 0) { + return a + } + return b +} + +func opsum(a, b *big.Float) *big.Float { + if a == nil { + return b + } else if b == nil { + return a + } + return a.Add(a, b) +} + +type meanagg struct { + v *big.Float + d float64 // actually an integer +} + +func mean(s, arg string) agg { + v, ok := parseFloat(s) + if !ok { + return &meanagg{new(big.Float), 0} + } + return &meanagg{v, 1} +} + +func (m *meanagg) String() string { + if m.d == 0 { + return "NaN" + } + v := new(big.Float).Quo(m.v, big.NewFloat(m.d)) + return v.Text('f', -1) +} + +func (m *meanagg) merge(s string) { + v, ok := parseFloat(s) + if !ok { + return + } + m.v.Add(m.v, v) + m.d++ +} + +func parseFloat(s string) (*big.Float, bool) { + v, _, err := big.ParseFloat(s, 0, 1000, big.ToNearestEven) + return v, err == nil +} + +type counter int + +func count(init, arg string) agg { return new(counter) } +func (c *counter) String() string { return strconv.Itoa(int(*c) + 1) } +func (c *counter) merge(string) { *c++ } diff --git a/vendor/github.com/kr/text/cmd/agg/string.go b/vendor/github.com/kr/text/cmd/agg/string.go new file mode 100644 index 0000000000..9a8cf78cae --- /dev/null +++ b/vendor/github.com/kr/text/cmd/agg/string.go @@ -0,0 +1,74 @@ +package main + +import ( + "math/rand" + "strings" +) + +func first(s, arg string) agg { return &sbinop{s, opfirst} } +func last(s, arg string) agg { return &sbinop{s, oplast} } +func prefix(s, arg string) agg { return &sbinop{s, opprefix} } +func join(s, arg string) agg { return &sbinop{s, opjoin(arg)} } +func smin(s, arg string) agg { return &sbinop{s, opsmin} } +func smax(s, arg string) agg { return &sbinop{s, opsmax} } + +type sbinop struct { + s string + f func(a, b string) string +} + +func (o *sbinop) String() string { return o.s } + +func (o *sbinop) merge(s string) { o.s = o.f(o.s, s) } + +func opfirst(a, b string) string { return a } +func oplast(a, b string) string { return b } + +func opprefix(a, b string) string { + for i := range a { + if i >= len(b) || a[i] != b[i] { + return a[:i] + } + } + return a +} + +func opjoin(sep string) func(a, b string) string { + return func(a, b string) string { + return a + sep + b // TODO(kr): too slow? maybe strings.Join? + } +} + +func opsmin(a, b string) string { + if strings.Compare(a, b) <= 0 { + return a + } + return b +} + +func opsmax(a, b string) string { + if strings.Compare(a, b) >= 0 { + return a + } + return b +} + +type sampler struct { + n int + s string +} + +func sample(s, arg string) agg { return &sampler{1, s} } +func (p *sampler) String() string { return p.s } +func (p *sampler) merge(s string) { + p.n++ + if rand.Intn(p.n) == 0 { + p.s = s + } +} + +type constant string + +func constf(init, arg string) agg { return constant(arg) } +func (c constant) String() string { return string(c) } +func (c constant) merge(string) {} diff --git a/vendor/github.com/kr/text/colwriter/column.go b/vendor/github.com/kr/text/colwriter/column.go new file mode 100644 index 0000000000..7302ce9f7a --- /dev/null +++ b/vendor/github.com/kr/text/colwriter/column.go @@ -0,0 +1,147 @@ +// Package colwriter provides a write filter that formats +// input lines in multiple columns. +// +// The package is a straightforward translation from +// /src/cmd/draw/mc.c in Plan 9 from User Space. +package colwriter + +import ( + "bytes" + "io" + "unicode/utf8" +) + +const ( + tab = 4 +) + +const ( + // Print each input line ending in a colon ':' separately. + BreakOnColon uint = 1 << iota +) + +// A Writer is a filter that arranges input lines in as many columns as will +// fit in its width. Tab '\t' chars in the input are translated to sequences +// of spaces ending at multiples of 4 positions. +// +// If BreakOnColon is set, each input line ending in a colon ':' is written +// separately. +// +// The Writer assumes that all Unicode code points have the same width; this +// may not be true in some fonts. +type Writer struct { + w io.Writer + buf []byte + width int + flag uint +} + +// NewWriter allocates and initializes a new Writer writing to w. +// Parameter width controls the total number of characters on each line +// across all columns. +func NewWriter(w io.Writer, width int, flag uint) *Writer { + return &Writer{ + w: w, + width: width, + flag: flag, + } +} + +// Write writes p to the writer w. The only errors returned are ones +// encountered while writing to the underlying output stream. +func (w *Writer) Write(p []byte) (n int, err error) { + var linelen int + var lastWasColon bool + for i, c := range p { + w.buf = append(w.buf, c) + linelen++ + if c == '\t' { + w.buf[len(w.buf)-1] = ' ' + for linelen%tab != 0 { + w.buf = append(w.buf, ' ') + linelen++ + } + } + if w.flag&BreakOnColon != 0 && c == ':' { + lastWasColon = true + } else if lastWasColon { + if c == '\n' { + pos := bytes.LastIndex(w.buf[:len(w.buf)-1], []byte{'\n'}) + if pos < 0 { + pos = 0 + } + line := w.buf[pos:] + w.buf = w.buf[:pos] + if err = w.columnate(); err != nil { + if len(line) < i { + return i - len(line), err + } + return 0, err + } + if n, err := w.w.Write(line); err != nil { + if r := len(line) - n; r < i { + return i - r, err + } + return 0, err + } + } + lastWasColon = false + } + if c == '\n' { + linelen = 0 + } + } + return len(p), nil +} + +// Flush should be called after the last call to Write to ensure that any data +// buffered in the Writer is written to output. +func (w *Writer) Flush() error { + return w.columnate() +} + +func (w *Writer) columnate() error { + words := bytes.Split(w.buf, []byte{'\n'}) + w.buf = nil + if len(words[len(words)-1]) == 0 { + words = words[:len(words)-1] + } + maxwidth := 0 + for _, wd := range words { + if n := utf8.RuneCount(wd); n > maxwidth { + maxwidth = n + } + } + maxwidth++ // space char + wordsPerLine := w.width / maxwidth + if wordsPerLine <= 0 { + wordsPerLine = 1 + } + nlines := (len(words) + wordsPerLine - 1) / wordsPerLine + for i := 0; i < nlines; i++ { + col := 0 + endcol := 0 + for j := i; j < len(words); j += nlines { + endcol += maxwidth + _, err := w.w.Write(words[j]) + if err != nil { + return err + } + col += utf8.RuneCount(words[j]) + if j+nlines < len(words) { + for col < endcol { + _, err := w.w.Write([]byte{' '}) + if err != nil { + return err + } + col++ + } + } + } + _, err := w.w.Write([]byte{'\n'}) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/kr/text/doc.go b/vendor/github.com/kr/text/doc.go new file mode 100644 index 0000000000..cf4c198f95 --- /dev/null +++ b/vendor/github.com/kr/text/doc.go @@ -0,0 +1,3 @@ +// Package text provides rudimentary functions for manipulating text in +// paragraphs. +package text diff --git a/vendor/github.com/kr/text/indent.go b/vendor/github.com/kr/text/indent.go new file mode 100644 index 0000000000..4ebac45c09 --- /dev/null +++ b/vendor/github.com/kr/text/indent.go @@ -0,0 +1,74 @@ +package text + +import ( + "io" +) + +// Indent inserts prefix at the beginning of each non-empty line of s. The +// end-of-line marker is NL. +func Indent(s, prefix string) string { + return string(IndentBytes([]byte(s), []byte(prefix))) +} + +// IndentBytes inserts prefix at the beginning of each non-empty line of b. +// The end-of-line marker is NL. +func IndentBytes(b, prefix []byte) []byte { + var res []byte + bol := true + for _, c := range b { + if bol && c != '\n' { + res = append(res, prefix...) + } + res = append(res, c) + bol = c == '\n' + } + return res +} + +// Writer indents each line of its input. +type indentWriter struct { + w io.Writer + bol bool + pre [][]byte + sel int + off int +} + +// NewIndentWriter makes a new write filter that indents the input +// lines. Each line is prefixed in order with the corresponding +// element of pre. If there are more lines than elements, the last +// element of pre is repeated for each subsequent line. +func NewIndentWriter(w io.Writer, pre ...[]byte) io.Writer { + return &indentWriter{ + w: w, + pre: pre, + bol: true, + } +} + +// The only errors returned are from the underlying indentWriter. +func (w *indentWriter) Write(p []byte) (n int, err error) { + for _, c := range p { + if w.bol { + var i int + i, err = w.w.Write(w.pre[w.sel][w.off:]) + w.off += i + if err != nil { + return n, err + } + } + _, err = w.w.Write([]byte{c}) + if err != nil { + return n, err + } + n++ + w.bol = c == '\n' + if w.bol { + w.off = 0 + if w.sel < len(w.pre)-1 { + w.sel++ + } + } + } + return n, nil +} diff --git a/vendor/github.com/kr/text/mc/mc.go b/vendor/github.com/kr/text/mc/mc.go new file mode 100644 index 0000000000..00169a30f1 --- /dev/null +++ b/vendor/github.com/kr/text/mc/mc.go @@ -0,0 +1,62 @@ +// Command mc prints in multiple columns. +// +// Usage: mc [-] [-N] [file...] +// +// Mc splits the input into as many columns as will fit in N +// print positions. If the output is a tty, the default N is +// the number of characters in a terminal line; otherwise the +// default N is 80. Under option - each input line ending in +// a colon ':' is printed separately. +package main + +import ( + "github.com/kr/pty" + "github.com/kr/text/colwriter" + "io" + "log" + "os" + "strconv" +) + +func main() { + var width int + var flag uint + args := os.Args[1:] + for len(args) > 0 && len(args[0]) > 0 && args[0][0] == '-' { + if len(args[0]) > 1 { + width, _ = strconv.Atoi(args[0][1:]) + } else { + flag |= colwriter.BreakOnColon + } + args = args[1:] + } + if width < 1 { + _, width, _ = pty.Getsize(os.Stdout) + } + if width < 1 { + width = 80 + } + + w := colwriter.NewWriter(os.Stdout, width, flag) + if len(args) > 0 { + for _, s := range args { + if f, err := os.Open(s); err == nil { + copyin(w, f) + f.Close() + } else { + log.Println(err) + } + } + } else { + copyin(w, os.Stdin) + } +} + +func copyin(w *colwriter.Writer, r io.Reader) { + if _, err := io.Copy(w, r); err != nil { + log.Println(err) + } + if err := w.Flush(); err != nil { + log.Println(err) + } +} diff --git a/vendor/github.com/kr/text/wrap.go b/vendor/github.com/kr/text/wrap.go new file mode 100644 index 0000000000..b09bb03736 --- /dev/null +++ b/vendor/github.com/kr/text/wrap.go @@ -0,0 +1,86 @@ +package text + +import ( + "bytes" + "math" +) + +var ( + nl = []byte{'\n'} + sp = []byte{' '} +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func Wrap(s string, lim int) string { + return string(WrapBytes([]byte(s), lim)) +} + +// WrapBytes wraps b into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapBytes(b []byte, lim int) []byte { + words := bytes.Split(bytes.Replace(bytes.TrimSpace(b), nl, sp, -1), sp) + var lines [][]byte + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, bytes.Join(line, sp)) + } + return bytes.Join(lines, nl) +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, either +// Wrap or WrapBytes will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each byte as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words [][]byte, spc, lim, pen int) [][][]byte { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = len(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + len(words[j]) + } + } + + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim || i == n-1 { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + + var lines [][][]byte + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} diff --git a/vendor/github.com/mattn/go-colorable/LICENSE b/vendor/github.com/mattn/go-colorable/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-colorable/colorable_others.go b/vendor/github.com/mattn/go-colorable/colorable_others.go new file mode 100644 index 0000000000..a7fe19a8ca --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_others.go @@ -0,0 +1,27 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +// NewColorable return new instance of Writer which handle escape sequence. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return os.Stdout +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/vendor/github.com/mattn/go-colorable/colorable_windows.go b/vendor/github.com/mattn/go-colorable/colorable_windows.go new file mode 100644 index 0000000000..628ad904e5 --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/colorable_windows.go @@ -0,0 +1,820 @@ +package colorable + +import ( + "bytes" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/mattn/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +type consoleCursorInfo struct { + size dword + visible int32 +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") + procGetConsoleCursorInfo = kernel32.NewProc("GetConsoleCursorInfo") + procSetConsoleCursorInfo = kernel32.NewProc("SetConsoleCursorInfo") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word + oldpos coord +} + +// NewColorable return new instance of Writer which handle escape sequence from File. +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes, oldpos: coord{0, 0}} + } else { + return file + } +} + +// NewColorableStdout return new instance of Writer which handle escape sequence for stdout. +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +// NewColorableStderr return new instance of Writer which handle escape sequence for stderr. +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +// Write write data on console +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + w.lastbuf.WriteByte(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteByte(c1) + w.lastbuf.WriteByte(c2) + continue + } + + var buf bytes.Buffer + var m byte + for { + c, err := er.ReadByte() + if err != nil { + w.lastbuf.WriteByte(c1) + w.lastbuf.WriteByte(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n - 1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2 - 1) + csbi.cursorPosition.y = short(n1 - 1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i++ { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr &= backgroundMask + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr &= foregroundMask + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + case 'h': + cs := buf.String() + if cs == "?25" { + var ci consoleCursorInfo + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 1 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 'l': + cs := buf.String() + if cs == "?25" { + var ci consoleCursorInfo + procGetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + ci.visible = 0 + procSetConsoleCursorInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&ci))) + } + case 's': + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + w.oldpos = csbi.cursorPosition + case 'u': + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&w.oldpos))) + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/vendor/github.com/mattn/go-colorable/noncolorable.go b/vendor/github.com/mattn/go-colorable/noncolorable.go new file mode 100644 index 0000000000..ca588c78ac --- /dev/null +++ b/vendor/github.com/mattn/go-colorable/noncolorable.go @@ -0,0 +1,61 @@ +package colorable + +import ( + "bytes" + "io" +) + +// NonColorable hold writer but remove escape sequence. +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +// NewNonColorable return new instance of Writer which remove escape sequence from Writer. +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +// Write write data on console +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewReader(data) + var bw [1]byte +loop: + for { + c1, err := er.ReadByte() + if err != nil { + break loop + } + if c1 != 0x1b { + bw[0] = c1 + w.out.Write(bw[:]) + continue + } + c2, err := er.ReadByte() + if err != nil { + w.lastbuf.WriteByte(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteByte(c1) + w.lastbuf.WriteByte(c2) + continue + } + + var buf bytes.Buffer + for { + c, err := er.ReadByte() + if err != nil { + w.lastbuf.WriteByte(c1) + w.lastbuf.WriteByte(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/vendor/github.com/mattn/go-isatty/LICENSE b/vendor/github.com/mattn/go-isatty/LICENSE new file mode 100644 index 0000000000..65dc692b6b --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/mattn/go-isatty/doc.go b/vendor/github.com/mattn/go-isatty/doc.go new file mode 100644 index 0000000000..17d4f90ebc --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go new file mode 100644 index 0000000000..83c588773c --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_bsd.go b/vendor/github.com/mattn/go-isatty/isatty_bsd.go new file mode 100644 index 0000000000..42f2514d13 --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_linux.go b/vendor/github.com/mattn/go-isatty/isatty_linux.go new file mode 100644 index 0000000000..9d24bac1db --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_solaris.go b/vendor/github.com/mattn/go-isatty/isatty_solaris.go new file mode 100644 index 0000000000..1f0c6bf53d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/vendor/github.com/mattn/go-isatty/isatty_windows.go b/vendor/github.com/mattn/go-isatty/isatty_windows.go new file mode 100644 index 0000000000..83c398b16d --- /dev/null +++ b/vendor/github.com/mattn/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE new file mode 100644 index 0000000000..91b5cef30e --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go new file mode 100644 index 0000000000..3e730656b5 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth.go @@ -0,0 +1,481 @@ +package runewidth + +var ( + // EastAsianWidth will be set true if the current locale is CJK + EastAsianWidth = IsEastAsian() + + // DefaultCondition is a condition in current locale + DefaultCondition = &Condition{EastAsianWidth} +) + +type interval struct { + first rune + last rune +} + +var combining = []interval{ + {0x0300, 0x036F}, {0x0483, 0x0486}, {0x0488, 0x0489}, + {0x0591, 0x05BD}, {0x05BF, 0x05BF}, {0x05C1, 0x05C2}, + {0x05C4, 0x05C5}, {0x05C7, 0x05C7}, {0x0600, 0x0603}, + {0x0610, 0x0615}, {0x064B, 0x065E}, {0x0670, 0x0670}, + {0x06D6, 0x06E4}, {0x06E7, 0x06E8}, {0x06EA, 0x06ED}, + {0x070F, 0x070F}, {0x0711, 0x0711}, {0x0730, 0x074A}, + {0x07A6, 0x07B0}, {0x07EB, 0x07F3}, {0x0901, 0x0902}, + {0x093C, 0x093C}, {0x0941, 0x0948}, {0x094D, 0x094D}, + {0x0951, 0x0954}, {0x0962, 0x0963}, {0x0981, 0x0981}, + {0x09BC, 0x09BC}, {0x09C1, 0x09C4}, {0x09CD, 0x09CD}, + {0x09E2, 0x09E3}, {0x0A01, 0x0A02}, {0x0A3C, 0x0A3C}, + {0x0A41, 0x0A42}, {0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, + {0x0A70, 0x0A71}, {0x0A81, 0x0A82}, {0x0ABC, 0x0ABC}, + {0x0AC1, 0x0AC5}, {0x0AC7, 0x0AC8}, {0x0ACD, 0x0ACD}, + {0x0AE2, 0x0AE3}, {0x0B01, 0x0B01}, {0x0B3C, 0x0B3C}, + {0x0B3F, 0x0B3F}, {0x0B41, 0x0B43}, {0x0B4D, 0x0B4D}, + {0x0B56, 0x0B56}, {0x0B82, 0x0B82}, {0x0BC0, 0x0BC0}, + {0x0BCD, 0x0BCD}, {0x0C3E, 0x0C40}, {0x0C46, 0x0C48}, + {0x0C4A, 0x0C4D}, {0x0C55, 0x0C56}, {0x0CBC, 0x0CBC}, + {0x0CBF, 0x0CBF}, {0x0CC6, 0x0CC6}, {0x0CCC, 0x0CCD}, + {0x0CE2, 0x0CE3}, {0x0D41, 0x0D43}, {0x0D4D, 0x0D4D}, + {0x0DCA, 0x0DCA}, {0x0DD2, 0x0DD4}, {0x0DD6, 0x0DD6}, + {0x0E31, 0x0E31}, {0x0E34, 0x0E3A}, {0x0E47, 0x0E4E}, + {0x0EB1, 0x0EB1}, {0x0EB4, 0x0EB9}, {0x0EBB, 0x0EBC}, + {0x0EC8, 0x0ECD}, {0x0F18, 0x0F19}, {0x0F35, 0x0F35}, + {0x0F37, 0x0F37}, {0x0F39, 0x0F39}, {0x0F71, 0x0F7E}, + {0x0F80, 0x0F84}, {0x0F86, 0x0F87}, {0x0F90, 0x0F97}, + {0x0F99, 0x0FBC}, {0x0FC6, 0x0FC6}, {0x102D, 0x1030}, + {0x1032, 0x1032}, {0x1036, 0x1037}, {0x1039, 0x1039}, + {0x1058, 0x1059}, {0x1160, 0x11FF}, {0x135F, 0x135F}, + {0x1712, 0x1714}, {0x1732, 0x1734}, {0x1752, 0x1753}, + {0x1772, 0x1773}, {0x17B4, 0x17B5}, {0x17B7, 0x17BD}, + {0x17C6, 0x17C6}, {0x17C9, 0x17D3}, {0x17DD, 0x17DD}, + {0x180B, 0x180D}, {0x18A9, 0x18A9}, {0x1920, 0x1922}, + {0x1927, 0x1928}, {0x1932, 0x1932}, {0x1939, 0x193B}, + {0x1A17, 0x1A18}, {0x1B00, 0x1B03}, {0x1B34, 0x1B34}, + {0x1B36, 0x1B3A}, {0x1B3C, 0x1B3C}, {0x1B42, 0x1B42}, + {0x1B6B, 0x1B73}, {0x1DC0, 0x1DCA}, {0x1DFE, 0x1DFF}, + {0x200B, 0x200F}, {0x202A, 0x202E}, {0x2060, 0x2063}, + {0x206A, 0x206F}, {0x20D0, 0x20EF}, {0x302A, 0x302F}, + {0x3099, 0x309A}, {0xA806, 0xA806}, {0xA80B, 0xA80B}, + {0xA825, 0xA826}, {0xFB1E, 0xFB1E}, {0xFE00, 0xFE0F}, + {0xFE20, 0xFE23}, {0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, + {0x10A01, 0x10A03}, {0x10A05, 0x10A06}, {0x10A0C, 0x10A0F}, + {0x10A38, 0x10A3A}, {0x10A3F, 0x10A3F}, {0x1D167, 0x1D169}, + {0x1D173, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD}, + {0x1D242, 0x1D244}, {0xE0001, 0xE0001}, {0xE0020, 0xE007F}, + {0xE0100, 0xE01EF}, +} + +type ctype int + +const ( + narrow ctype = iota + ambiguous + wide + halfwidth + fullwidth + neutral +) + +type intervalType struct { + first rune + last rune + ctype ctype +} + +var ctypes = []intervalType{ + {0x0020, 0x007E, narrow}, + {0x00A1, 0x00A1, ambiguous}, + {0x00A2, 0x00A3, narrow}, + {0x00A4, 0x00A4, ambiguous}, + {0x00A5, 0x00A6, narrow}, + {0x00A7, 0x00A8, ambiguous}, + {0x00AA, 0x00AA, ambiguous}, + {0x00AC, 0x00AC, narrow}, + {0x00AD, 0x00AE, ambiguous}, + {0x00AF, 0x00AF, narrow}, + {0x00B0, 0x00B4, ambiguous}, + {0x00B6, 0x00BA, ambiguous}, + {0x00BC, 0x00BF, ambiguous}, + {0x00C6, 0x00C6, ambiguous}, + {0x00D0, 0x00D0, ambiguous}, + {0x00D7, 0x00D8, ambiguous}, + {0x00DE, 0x00E1, ambiguous}, + {0x00E6, 0x00E6, ambiguous}, + {0x00E8, 0x00EA, ambiguous}, + {0x00EC, 0x00ED, ambiguous}, + {0x00F0, 0x00F0, ambiguous}, + {0x00F2, 0x00F3, ambiguous}, + {0x00F7, 0x00FA, ambiguous}, + {0x00FC, 0x00FC, ambiguous}, + {0x00FE, 0x00FE, ambiguous}, + {0x0101, 0x0101, ambiguous}, + {0x0111, 0x0111, ambiguous}, + {0x0113, 0x0113, ambiguous}, + {0x011B, 0x011B, ambiguous}, + {0x0126, 0x0127, ambiguous}, + {0x012B, 0x012B, ambiguous}, + {0x0131, 0x0133, ambiguous}, + {0x0138, 0x0138, ambiguous}, + {0x013F, 0x0142, ambiguous}, + {0x0144, 0x0144, ambiguous}, + {0x0148, 0x014B, ambiguous}, + {0x014D, 0x014D, ambiguous}, + {0x0152, 0x0153, ambiguous}, + {0x0166, 0x0167, ambiguous}, + {0x016B, 0x016B, ambiguous}, + {0x01CE, 0x01CE, ambiguous}, + {0x01D0, 0x01D0, ambiguous}, + {0x01D2, 0x01D2, ambiguous}, + {0x01D4, 0x01D4, ambiguous}, + {0x01D6, 0x01D6, ambiguous}, + {0x01D8, 0x01D8, ambiguous}, + {0x01DA, 0x01DA, ambiguous}, + {0x01DC, 0x01DC, ambiguous}, + {0x0251, 0x0251, ambiguous}, + {0x0261, 0x0261, ambiguous}, + {0x02C4, 0x02C4, ambiguous}, + {0x02C7, 0x02C7, ambiguous}, + {0x02C9, 0x02CB, ambiguous}, + {0x02CD, 0x02CD, ambiguous}, + {0x02D0, 0x02D0, ambiguous}, + {0x02D8, 0x02DB, ambiguous}, + {0x02DD, 0x02DD, ambiguous}, + {0x02DF, 0x02DF, ambiguous}, + {0x0300, 0x036F, ambiguous}, + {0x0391, 0x03A2, ambiguous}, + {0x03A3, 0x03A9, ambiguous}, + {0x03B1, 0x03C1, ambiguous}, + {0x03C3, 0x03C9, ambiguous}, + {0x0401, 0x0401, ambiguous}, + {0x0410, 0x044F, ambiguous}, + {0x0451, 0x0451, ambiguous}, + {0x1100, 0x115F, wide}, + {0x2010, 0x2010, ambiguous}, + {0x2013, 0x2016, ambiguous}, + {0x2018, 0x2019, ambiguous}, + {0x201C, 0x201D, ambiguous}, + {0x2020, 0x2022, ambiguous}, + {0x2024, 0x2027, ambiguous}, + {0x2030, 0x2030, ambiguous}, + {0x2032, 0x2033, ambiguous}, + {0x2035, 0x2035, ambiguous}, + {0x203B, 0x203B, ambiguous}, + {0x203E, 0x203E, ambiguous}, + {0x2074, 0x2074, ambiguous}, + {0x207F, 0x207F, ambiguous}, + {0x2081, 0x2084, ambiguous}, + {0x20A9, 0x20A9, halfwidth}, + {0x20AC, 0x20AC, ambiguous}, + {0x2103, 0x2103, ambiguous}, + {0x2105, 0x2105, ambiguous}, + {0x2109, 0x2109, ambiguous}, + {0x2113, 0x2113, ambiguous}, + {0x2116, 0x2116, ambiguous}, + {0x2121, 0x2122, ambiguous}, + {0x2126, 0x2126, ambiguous}, + {0x212B, 0x212B, ambiguous}, + {0x2153, 0x2154, ambiguous}, + {0x215B, 0x215E, ambiguous}, + {0x2160, 0x216B, ambiguous}, + {0x2170, 0x2179, ambiguous}, + {0x2189, 0x218A, ambiguous}, + {0x2190, 0x2199, ambiguous}, + {0x21B8, 0x21B9, ambiguous}, + {0x21D2, 0x21D2, ambiguous}, + {0x21D4, 0x21D4, ambiguous}, + {0x21E7, 0x21E7, ambiguous}, + {0x2200, 0x2200, ambiguous}, + {0x2202, 0x2203, ambiguous}, + {0x2207, 0x2208, ambiguous}, + {0x220B, 0x220B, ambiguous}, + {0x220F, 0x220F, ambiguous}, + {0x2211, 0x2211, ambiguous}, + {0x2215, 0x2215, ambiguous}, + {0x221A, 0x221A, ambiguous}, + {0x221D, 0x2220, ambiguous}, + {0x2223, 0x2223, ambiguous}, + {0x2225, 0x2225, ambiguous}, + {0x2227, 0x222C, ambiguous}, + {0x222E, 0x222E, ambiguous}, + {0x2234, 0x2237, ambiguous}, + {0x223C, 0x223D, ambiguous}, + {0x2248, 0x2248, ambiguous}, + {0x224C, 0x224C, ambiguous}, + {0x2252, 0x2252, ambiguous}, + {0x2260, 0x2261, ambiguous}, + {0x2264, 0x2267, ambiguous}, + {0x226A, 0x226B, ambiguous}, + {0x226E, 0x226F, ambiguous}, + {0x2282, 0x2283, ambiguous}, + {0x2286, 0x2287, ambiguous}, + {0x2295, 0x2295, ambiguous}, + {0x2299, 0x2299, ambiguous}, + {0x22A5, 0x22A5, ambiguous}, + {0x22BF, 0x22BF, ambiguous}, + {0x2312, 0x2312, ambiguous}, + {0x2329, 0x232A, wide}, + {0x2460, 0x24E9, ambiguous}, + {0x24EB, 0x254B, ambiguous}, + {0x2550, 0x2573, ambiguous}, + {0x2580, 0x258F, ambiguous}, + {0x2592, 0x2595, ambiguous}, + {0x25A0, 0x25A1, ambiguous}, + {0x25A3, 0x25A9, ambiguous}, + {0x25B2, 0x25B3, ambiguous}, + {0x25B6, 0x25B7, ambiguous}, + {0x25BC, 0x25BD, ambiguous}, + {0x25C0, 0x25C1, ambiguous}, + {0x25C6, 0x25C8, ambiguous}, + {0x25CB, 0x25CB, ambiguous}, + {0x25CE, 0x25D1, ambiguous}, + {0x25E2, 0x25E5, ambiguous}, + {0x25EF, 0x25EF, ambiguous}, + {0x2605, 0x2606, ambiguous}, + {0x2609, 0x2609, ambiguous}, + {0x260E, 0x260F, ambiguous}, + {0x2614, 0x2615, ambiguous}, + {0x261C, 0x261C, ambiguous}, + {0x261E, 0x261E, ambiguous}, + {0x2640, 0x2640, ambiguous}, + {0x2642, 0x2642, ambiguous}, + {0x2660, 0x2661, ambiguous}, + {0x2663, 0x2665, ambiguous}, + {0x2667, 0x266A, ambiguous}, + {0x266C, 0x266D, ambiguous}, + {0x266F, 0x266F, ambiguous}, + {0x269E, 0x269F, ambiguous}, + {0x26BE, 0x26BF, ambiguous}, + {0x26C4, 0x26CD, ambiguous}, + {0x26CF, 0x26E1, ambiguous}, + {0x26E3, 0x26E3, ambiguous}, + {0x26E8, 0x26FF, ambiguous}, + {0x273D, 0x273D, ambiguous}, + {0x2757, 0x2757, ambiguous}, + {0x2776, 0x277F, ambiguous}, + {0x27E6, 0x27ED, narrow}, + {0x2985, 0x2986, narrow}, + {0x2B55, 0x2B59, ambiguous}, + {0x2E80, 0x2E9A, wide}, + {0x2E9B, 0x2EF4, wide}, + {0x2F00, 0x2FD6, wide}, + {0x2FF0, 0x2FFC, wide}, + {0x3000, 0x3000, fullwidth}, + {0x3001, 0x303E, wide}, + {0x3041, 0x3097, wide}, + {0x3099, 0x3100, wide}, + {0x3105, 0x312E, wide}, + {0x3131, 0x318F, wide}, + {0x3190, 0x31BB, wide}, + {0x31C0, 0x31E4, wide}, + {0x31F0, 0x321F, wide}, + {0x3220, 0x3247, wide}, + {0x3248, 0x324F, ambiguous}, + {0x3250, 0x32FF, wide}, + {0x3300, 0x4DBF, wide}, + {0x4E00, 0xA48D, wide}, + {0xA490, 0xA4C7, wide}, + {0xA960, 0xA97D, wide}, + {0xAC00, 0xD7A4, wide}, + {0xE000, 0xF8FF, ambiguous}, + {0xF900, 0xFAFF, wide}, + {0xFE00, 0xFE0F, ambiguous}, + {0xFE10, 0xFE1A, wide}, + {0xFE30, 0xFE53, wide}, + {0xFE54, 0xFE67, wide}, + {0xFE68, 0xFE6C, wide}, + {0xFF01, 0xFF60, fullwidth}, + {0xFF61, 0xFFBF, halfwidth}, + {0xFFC2, 0xFFC8, halfwidth}, + {0xFFCA, 0xFFD0, halfwidth}, + {0xFFD2, 0xFFD8, halfwidth}, + {0xFFDA, 0xFFDD, halfwidth}, + {0xFFE0, 0xFFE7, fullwidth}, + {0xFFE8, 0xFFEF, halfwidth}, + {0xFFFD, 0xFFFE, ambiguous}, + {0x1B000, 0x1B002, wide}, + {0x1F100, 0x1F10A, ambiguous}, + {0x1F110, 0x1F12D, ambiguous}, + {0x1F130, 0x1F169, ambiguous}, + {0x1F170, 0x1F19B, ambiguous}, + {0x1F200, 0x1F203, wide}, + {0x1F210, 0x1F23B, wide}, + {0x1F240, 0x1F249, wide}, + {0x1F250, 0x1F252, wide}, + {0x20000, 0x2FFFE, wide}, + {0x30000, 0x3FFFE, wide}, + {0xE0100, 0xE01F0, ambiguous}, + {0xF0000, 0xFFFFD, ambiguous}, + {0x100000, 0x10FFFE, ambiguous}, +} + +// Condition have flag EastAsianWidth whether the current locale is CJK or not. +type Condition struct { + EastAsianWidth bool +} + +// NewCondition return new instance of Condition which is current locale. +func NewCondition() *Condition { + return &Condition{EastAsianWidth} +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func (c *Condition) RuneWidth(r rune) int { + if r == 0 { + return 0 + } + if r < 32 || (r >= 0x7f && r < 0xa0) { + return 1 + } + for _, iv := range combining { + if iv.first <= r && r <= iv.last { + return 0 + } + } + + if c.EastAsianWidth && IsAmbiguousWidth(r) { + return 2 + } + + if r >= 0x1100 && + (r <= 0x115f || r == 0x2329 || r == 0x232a || + (r >= 0x2e80 && r <= 0xa4cf && r != 0x303f) || + (r >= 0xac00 && r <= 0xd7a3) || + (r >= 0xf900 && r <= 0xfaff) || + (r >= 0xfe30 && r <= 0xfe6f) || + (r >= 0xff00 && r <= 0xff60) || + (r >= 0xffe0 && r <= 0xffe6) || + (r >= 0x20000 && r <= 0x2fffd) || + (r >= 0x30000 && r <= 0x3fffd)) { + return 2 + } + return 1 +} + +// StringWidth return width as you can see +func (c *Condition) StringWidth(s string) (width int) { + for _, r := range []rune(s) { + width += c.RuneWidth(r) + } + return width +} + +// Truncate return string truncated with w cells +func (c *Condition) Truncate(s string, w int, tail string) string { + if c.StringWidth(s) <= w { + return s + } + r := []rune(s) + tw := c.StringWidth(tail) + w -= tw + width := 0 + i := 0 + for ; i < len(r); i++ { + cw := c.RuneWidth(r[i]) + if width+cw > w { + break + } + width += cw + } + return string(r[0:i]) + tail +} + +// Wrap return string wrapped with w cells +func (c *Condition) Wrap(s string, w int) string { + width := 0 + out := "" + for _, r := range []rune(s) { + cw := RuneWidth(r) + if r == '\n' { + out += string(r) + width = 0 + continue + } else if width+cw > w { + out += "\n" + width = 0 + out += string(r) + width += cw + continue + } + out += string(r) + width += cw + } + return out +} + +// FillLeft return string filled in left by spaces in w cells +func (c *Condition) FillLeft(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return string(b) + s + } + return s +} + +// FillRight return string filled in left by spaces in w cells +func (c *Condition) FillRight(s string, w int) string { + width := c.StringWidth(s) + count := w - width + if count > 0 { + b := make([]byte, count) + for i := range b { + b[i] = ' ' + } + return s + string(b) + } + return s +} + +// RuneWidth returns the number of cells in r. +// See http://www.unicode.org/reports/tr11/ +func RuneWidth(r rune) int { + return DefaultCondition.RuneWidth(r) +} + +func ct(r rune) ctype { + for _, iv := range ctypes { + if iv.first <= r && r <= iv.last { + return iv.ctype + } + } + return neutral +} + +// IsAmbiguousWidth returns whether is ambiguous width or not. +func IsAmbiguousWidth(r rune) bool { + return ct(r) == ambiguous +} + +// IsNeutralWidth returns whether is neutral width or not. +func IsNeutralWidth(r rune) bool { + return ct(r) == neutral +} + +// StringWidth return width as you can see +func StringWidth(s string) (width int) { + return DefaultCondition.StringWidth(s) +} + +// Truncate return string truncated with w cells +func Truncate(s string, w int, tail string) string { + return DefaultCondition.Truncate(s, w, tail) +} + +// Wrap return string wrapped with w cells +func Wrap(s string, w int) string { + return DefaultCondition.Wrap(s, w) +} + +// FillLeft return string filled in left by spaces in w cells +func FillLeft(s string, w int) string { + return DefaultCondition.FillLeft(s, w) +} + +// FillRight return string filled in left by spaces in w cells +func FillRight(s string, w int) string { + return DefaultCondition.FillRight(s, w) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go new file mode 100644 index 0000000000..0ce32c5e7b --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go @@ -0,0 +1,8 @@ +// +build js + +package runewidth + +func IsEastAsian() bool { + // TODO: Implement this for the web. Detect east asian in a compatible way, and return true. + return false +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go new file mode 100644 index 0000000000..c579e9a314 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go @@ -0,0 +1,77 @@ +// +build !windows,!js + +package runewidth + +import ( + "os" + "regexp" + "strings" +) + +var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`) + +var mblenTable = map[string]int{ + "utf-8": 6, + "utf8": 6, + "jis": 8, + "eucjp": 3, + "euckr": 2, + "euccn": 2, + "sjis": 2, + "cp932": 2, + "cp51932": 2, + "cp936": 2, + "cp949": 2, + "cp950": 2, + "big5": 2, + "gbk": 2, + "gb2312": 2, +} + +func isEastAsian(locale string) bool { + charset := strings.ToLower(locale) + r := reLoc.FindStringSubmatch(locale) + if len(r) == 2 { + charset = strings.ToLower(r[1]) + } + + if strings.HasSuffix(charset, "@cjk_narrow") { + return false + } + + for pos, b := range []byte(charset) { + if b == '@' { + charset = charset[:pos] + break + } + } + max := 1 + if m, ok := mblenTable[charset]; ok { + max = m + } + if max > 1 && (charset[0] != 'u' || + strings.HasPrefix(locale, "ja") || + strings.HasPrefix(locale, "ko") || + strings.HasPrefix(locale, "zh")) { + return true + } + return false +} + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + locale := os.Getenv("LC_CTYPE") + if locale == "" { + locale = os.Getenv("LANG") + } + + // ignore C locale + if locale == "POSIX" || locale == "C" { + return false + } + if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') { + return false + } + + return isEastAsian(locale) +} diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go new file mode 100644 index 0000000000..0258876b99 --- /dev/null +++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go @@ -0,0 +1,25 @@ +package runewidth + +import ( + "syscall" +) + +var ( + kernel32 = syscall.NewLazyDLL("kernel32") + procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP") +) + +// IsEastAsian return true if the current locale is CJK +func IsEastAsian() bool { + r1, _, _ := procGetConsoleOutputCP.Call() + if r1 == 0 { + return false + } + + switch int(r1) { + case 932, 51932, 936, 949, 950: + return true + } + + return false +} diff --git a/vendor/github.com/mgutz/ansi/LICENSE b/vendor/github.com/mgutz/ansi/LICENSE new file mode 100644 index 0000000000..06ce0c3b51 --- /dev/null +++ b/vendor/github.com/mgutz/ansi/LICENSE @@ -0,0 +1,9 @@ +The MIT License (MIT) +Copyright (c) 2013 Mario L. Gutierrez + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/mgutz/ansi/ansi.go b/vendor/github.com/mgutz/ansi/ansi.go new file mode 100644 index 0000000000..099aee3c5c --- /dev/null +++ b/vendor/github.com/mgutz/ansi/ansi.go @@ -0,0 +1,246 @@ +package ansi + +import ( + "bytes" + "fmt" + "strconv" + "strings" +) + +const ( + black = iota + red + green + yellow + blue + magenta + cyan + white + defaultt = 9 + + normalIntensityFG = 30 + highIntensityFG = 90 + normalIntensityBG = 40 + highIntensityBG = 100 + + start = "\033[" + bold = "1;" + blink = "5;" + underline = "4;" + inverse = "7;" + + // Reset is the ANSI reset escape sequence + Reset = "\033[0m" + // DefaultBG is the default background + DefaultBG = "\033[49m" + // DefaultFG is the default foreground + DefaultFG = "\033[39m" +) + +// Black FG +var Black string + +// Red FG +var Red string + +// Green FG +var Green string + +// Yellow FG +var Yellow string + +// Blue FG +var Blue string + +// Magenta FG +var Magenta string + +// Cyan FG +var Cyan string + +// White FG +var White string + +// LightBlack FG +var LightBlack string + +// LightRed FG +var LightRed string + +// LightGreen FG +var LightGreen string + +// LightYellow FG +var LightYellow string + +// LightBlue FG +var LightBlue string + +// LightMagenta FG +var LightMagenta string + +// LightCyan FG +var LightCyan string + +// LightWhite FG +var LightWhite string + +var ( + plain = false + // Colors maps common color names to their ANSI color code. + Colors = map[string]int{ + "black": black, + "red": red, + "green": green, + "yellow": yellow, + "blue": blue, + "magenta": magenta, + "cyan": cyan, + "white": white, + "default": defaultt, + } +) + +func init() { + for i := 0; i < 256; i++ { + Colors[strconv.Itoa(i)] = i + } + + Black = ColorCode("black") + Red = ColorCode("red") + Green = ColorCode("green") + Yellow = ColorCode("yellow") + Blue = ColorCode("blue") + Magenta = ColorCode("magenta") + Cyan = ColorCode("cyan") + White = ColorCode("white") + LightBlack = ColorCode("black+h") + LightRed = ColorCode("red+h") + LightGreen = ColorCode("green+h") + LightYellow = ColorCode("yellow+h") + LightBlue = ColorCode("blue+h") + LightMagenta = ColorCode("magenta+h") + LightCyan = ColorCode("cyan+h") + LightWhite = ColorCode("white+h") +} + +// ColorCode returns the ANSI color color code for style. +func ColorCode(style string) string { + return colorCode(style).String() +} + +// Gets the ANSI color code for a style. +func colorCode(style string) *bytes.Buffer { + buf := bytes.NewBufferString("") + if plain || style == "" { + return buf + } + if style == "reset" { + buf.WriteString(Reset) + return buf + } else if style == "off" { + return buf + } + + foregroundBackground := strings.Split(style, ":") + foreground := strings.Split(foregroundBackground[0], "+") + fgKey := foreground[0] + fg := Colors[fgKey] + fgStyle := "" + if len(foreground) > 1 { + fgStyle = foreground[1] + } + + bg, bgStyle := "", "" + + if len(foregroundBackground) > 1 { + background := strings.Split(foregroundBackground[1], "+") + bg = background[0] + if len(background) > 1 { + bgStyle = background[1] + } + } + + buf.WriteString(start) + base := normalIntensityFG + if len(fgStyle) > 0 { + if strings.Contains(fgStyle, "b") { + buf.WriteString(bold) + } + if strings.Contains(fgStyle, "B") { + buf.WriteString(blink) + } + if strings.Contains(fgStyle, "u") { + buf.WriteString(underline) + } + if strings.Contains(fgStyle, "i") { + buf.WriteString(inverse) + } + if strings.Contains(fgStyle, "h") { + base = highIntensityFG + } + } + + // if 256-color + n, err := strconv.Atoi(fgKey) + if err == nil { + fmt.Fprintf(buf, "38;5;%d;", n) + } else { + fmt.Fprintf(buf, "%d;", base+fg) + } + + base = normalIntensityBG + if len(bg) > 0 { + if strings.Contains(bgStyle, "h") { + base = highIntensityBG + } + // if 256-color + n, err := strconv.Atoi(bg) + if err == nil { + fmt.Fprintf(buf, "48;5;%d;", n) + } else { + fmt.Fprintf(buf, "%d;", base+Colors[bg]) + } + } + + // remove last ";" + buf.Truncate(buf.Len() - 1) + buf.WriteRune('m') + return buf +} + +// Color colors a string based on the ANSI color code for style. +func Color(s, style string) string { + if plain || len(style) < 1 { + return s + } + buf := colorCode(style) + buf.WriteString(s) + buf.WriteString(Reset) + return buf.String() +} + +// ColorFunc creates a closureto avoid ANSI color code calculation. +func ColorFunc(style string) func(string) string { + if style == "" { + return func(s string) string { + return s + } + } + color := ColorCode(style) + return func(s string) string { + if plain || s == "" { + return s + } + buf := bytes.NewBufferString(color) + buf.WriteString(s) + buf.WriteString(Reset) + result := buf.String() + return result + } +} + +// DisableColors disables ANSI color codes. On by default. +func DisableColors(disable bool) { + plain = disable +} diff --git a/vendor/github.com/mgutz/ansi/cmd/ansi-mgutz/main.go b/vendor/github.com/mgutz/ansi/cmd/ansi-mgutz/main.go new file mode 100644 index 0000000000..66ab77979c --- /dev/null +++ b/vendor/github.com/mgutz/ansi/cmd/ansi-mgutz/main.go @@ -0,0 +1,135 @@ +package main + +import ( + "fmt" + "sort" + "strconv" + + "github.com/mattn/go-colorable" + "github.com/mgutz/ansi" +) + +func main() { + printColors() + print256Colors() + printConstants() +} + +func pad(s string, length int) string { + for len(s) < length { + s += " " + } + return s +} + +func padColor(s string, styles []string) string { + buffer := "" + for _, style := range styles { + buffer += ansi.Color(pad(s+style, 20), s+style) + } + return buffer +} + +func printPlain() { + ansi.DisableColors(true) + bgColors := []string{ + "", + ":black", + ":red", + ":green", + ":yellow", + ":blue", + ":magenta", + ":cyan", + ":white", + } + for fg := range ansi.Colors { + for _, bg := range bgColors { + println(padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg})) + println(padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"})) + println(padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"})) + } + } +} + +func printColors() { + ansi.DisableColors(false) + stdout := colorable.NewColorableStdout() + + bgColors := []string{ + "", + ":black", + ":red", + ":green", + ":yellow", + ":blue", + ":magenta", + ":cyan", + ":white", + } + + keys := []string{} + for fg := range ansi.Colors { + _, err := strconv.Atoi(fg) + if err != nil { + keys = append(keys, fg) + } + } + sort.Strings(keys) + + for _, fg := range keys { + for _, bg := range bgColors { + fmt.Fprintln(stdout, padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg})) + fmt.Fprintln(stdout, padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"})) + fmt.Fprintln(stdout, padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"})) + } + } +} + +func print256Colors() { + ansi.DisableColors(false) + stdout := colorable.NewColorableStdout() + + bgColors := []string{""} + for i := 0; i < 256; i++ { + key := fmt.Sprintf(":%d", i) + bgColors = append(bgColors, key) + } + + keys := []string{} + for fg := range ansi.Colors { + n, err := strconv.Atoi(fg) + if err == nil { + keys = append(keys, fmt.Sprintf("%3d", n)) + } + } + sort.Strings(keys) + + for _, fg := range keys { + for _, bg := range bgColors { + fmt.Fprintln(stdout, padColor(fg, []string{"" + bg, "+b" + bg, "+u" + bg})) + fmt.Fprintln(stdout, padColor(fg, []string{"+B" + bg, "+Bb" + bg})) + } + } +} + +func printConstants() { + stdout := colorable.NewColorableStdout() + fmt.Fprintln(stdout, ansi.DefaultFG, "ansi.DefaultFG", ansi.Reset) + fmt.Fprintln(stdout, ansi.Black, "ansi.Black", ansi.Reset) + fmt.Fprintln(stdout, ansi.Red, "ansi.Red", ansi.Reset) + fmt.Fprintln(stdout, ansi.Green, "ansi.Green", ansi.Reset) + fmt.Fprintln(stdout, ansi.Yellow, "ansi.Yellow", ansi.Reset) + fmt.Fprintln(stdout, ansi.Blue, "ansi.Blue", ansi.Reset) + fmt.Fprintln(stdout, ansi.Magenta, "ansi.Magenta", ansi.Reset) + fmt.Fprintln(stdout, ansi.Cyan, "ansi.Cyan", ansi.Reset) + fmt.Fprintln(stdout, ansi.White, "ansi.White", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightBlack, "ansi.LightBlack", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightRed, "ansi.LightRed", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightGreen, "ansi.LightGreen", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightYellow, "ansi.LightYellow", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightBlue, "ansi.LightBlue", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightMagenta, "ansi.LightMagenta", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightCyan, "ansi.LightCyan", ansi.Reset) + fmt.Fprintln(stdout, ansi.LightWhite, "ansi.LightWhite", ansi.Reset) +} diff --git a/vendor/github.com/mgutz/ansi/doc.go b/vendor/github.com/mgutz/ansi/doc.go new file mode 100644 index 0000000000..43c217e11d --- /dev/null +++ b/vendor/github.com/mgutz/ansi/doc.go @@ -0,0 +1,65 @@ +/* +Package ansi is a small, fast library to create ANSI colored strings and codes. + +Installation + + # this installs the color viewer and the package + go get -u github.com/mgutz/ansi/cmd/ansi-mgutz + +Example + + // colorize a string, SLOW + msg := ansi.Color("foo", "red+b:white") + + // create a closure to avoid recalculating ANSI code compilation + phosphorize := ansi.ColorFunc("green+h:black") + msg = phosphorize("Bring back the 80s!") + msg2 := phospohorize("Look, I'm a CRT!") + + // cache escape codes and build strings manually + lime := ansi.ColorCode("green+h:black") + reset := ansi.ColorCode("reset") + + fmt.Println(lime, "Bring back the 80s!", reset) + +Other examples + + Color(s, "red") // red + Color(s, "red+b") // red bold + Color(s, "red+B") // red blinking + Color(s, "red+u") // red underline + Color(s, "red+bh") // red bold bright + Color(s, "red:white") // red on white + Color(s, "red+b:white+h") // red bold on white bright + Color(s, "red+B:white+h") // red blink on white bright + +To view color combinations, from terminal + + ansi-mgutz + +Style format + + "foregroundColor+attributes:backgroundColor+attributes" + +Colors + + black + red + green + yellow + blue + magenta + cyan + white + +Attributes + + b = bold foreground + B = Blink foreground + u = underline foreground + h = high intensity (bright) foreground, background + i = inverse + +Wikipedia ANSI escape codes [Colors](http://en.wikipedia.org/wiki/ANSI_escape_code#Colors) +*/ +package ansi diff --git a/vendor/github.com/mgutz/ansi/print.go b/vendor/github.com/mgutz/ansi/print.go new file mode 100644 index 0000000000..044c9a684b --- /dev/null +++ b/vendor/github.com/mgutz/ansi/print.go @@ -0,0 +1,42 @@ +package ansi + +// PrintStyles prints all style combinations to the terminal. +func PrintStyles() { + oldPlain := plain + plain = false + + bgColors := []string{ + "", + ":black", + ":red", + ":green", + ":yellow", + ":blue", + ":magenta", + ":cyan", + ":white", + } + for fg := range Colors { + for _, bg := range bgColors { + println(padColor(fg, []string{"" + bg, "+b" + bg, "+bh" + bg, "+u" + bg})) + println(padColor(fg, []string{"+uh" + bg, "+B" + bg, "+Bb" + bg /* backgrounds */, "" + bg + "+h"})) + println(padColor(fg, []string{"+b" + bg + "+h", "+bh" + bg + "+h", "+u" + bg + "+h", "+uh" + bg + "+h"})) + } + } + plain = oldPlain +} + +func pad(s string, length int) string { + for len(s) < length { + s += " " + } + return s +} + +func padColor(s string, styles []string) string { + buffer := "" + for _, style := range styles { + buffer += Color(pad(s+style, 20), s+style) + } + return buffer +} diff --git a/vendor/github.com/olekukonko/tablewriter/LICENCE.md b/vendor/github.com/olekukonko/tablewriter/LICENCE.md new file mode 100644 index 0000000000..1fd8484253 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/LICENCE.md @@ -0,0 +1,19 @@ +Copyright (C) 2014 by Oleku Konko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/olekukonko/tablewriter/csv.go b/vendor/github.com/olekukonko/tablewriter/csv.go new file mode 100644 index 0000000000..98878303bc --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/csv.go @@ -0,0 +1,52 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +package tablewriter + +import ( + "encoding/csv" + "io" + "os" +) + +// Start A new table by importing from a CSV file +// Takes io.Writer and csv File name +func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) { + file, err := os.Open(fileName) + if err != nil { + return &Table{}, err + } + defer file.Close() + csvReader := csv.NewReader(file) + t, err := NewCSVReader(writer, csvReader, hasHeader) + return t, err +} + +// Start a New Table Writer with csv.Reader +// This enables customisation such as reader.Comma = ';' +// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94 +func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) { + t := NewWriter(writer) + if hasHeader { + // Read the first row + headers, err := csvReader.Read() + if err != nil { + return &Table{}, err + } + t.SetHeader(headers) + } + for { + record, err := csvReader.Read() + if err == io.EOF { + break + } else if err != nil { + return &Table{}, err + } + t.Append(record) + } + return t, nil +} diff --git a/vendor/github.com/olekukonko/tablewriter/csv2table/csv2table.go b/vendor/github.com/olekukonko/tablewriter/csv2table/csv2table.go new file mode 100644 index 0000000000..a4e9b13816 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/csv2table/csv2table.go @@ -0,0 +1,84 @@ +package main + +import ( + "encoding/csv" + "flag" + "fmt" + "github.com/olekukonko/tablewriter" + "io" + "os" + "unicode/utf8" +) + +var ( + fileName = flag.String("f", "", "Set file with eg. sample.csv") + delimiter = flag.String("d", ",", "Set CSV File delimiter eg. ,|;|\t ") + header = flag.Bool("h", true, "Set header options eg. true|false ") + align = flag.String("a", "none", "Set aligmement with eg. none|left|right|center") + pipe = flag.Bool("p", false, "Suport for Piping from STDIN") + border = flag.Bool("b", true, "Enable / disable table border") +) + +func main() { + flag.Parse() + fmt.Println() + if *pipe || hasArg("-p") { + process(os.Stdin) + } else { + if *fileName == "" { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + flag.PrintDefaults() + fmt.Println() + os.Exit(1) + } + processFile() + } + fmt.Println() +} + +func hasArg(name string) bool { + for _ , v := range os.Args { + if name == v { + return true + } + } + return false +} +func processFile() { + r, err := os.Open(*fileName) + if err != nil { + exit(err) + } + defer r.Close() + process(r) +} +func process(r io.Reader) { + csvReader := csv.NewReader(r) + rune, size := utf8.DecodeRuneInString(*delimiter) + if size == 0 { + rune = ',' + } + csvReader.Comma = rune + + table, err := tablewriter.NewCSVReader(os.Stdout, csvReader, *header) + + if err != nil { + exit(err) + } + + switch *align { + case "left": + table.SetAlignment(tablewriter.ALIGN_LEFT) + case "right": + table.SetAlignment(tablewriter.ALIGN_RIGHT) + case "center": + table.SetAlignment(tablewriter.ALIGN_CENTER) + } + table.SetBorder(*border) + table.Render() +} + +func exit(err error) { + fmt.Fprintf(os.Stderr, "#Error : %s", err) + os.Exit(1) +} diff --git a/vendor/github.com/olekukonko/tablewriter/table.go b/vendor/github.com/olekukonko/tablewriter/table.go new file mode 100644 index 0000000000..68cb46d903 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/table.go @@ -0,0 +1,529 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +// Create & Generate text based table +package tablewriter + +import ( + "fmt" + "io" + "regexp" + "strings" +) + +const ( + MAX_ROW_WIDTH = 30 +) + +const ( + CENTER = "+" + ROW = "-" + COLUMN = "|" + SPACE = " " + NEWLINE = "\n" +) + +const ( + ALIGN_DEFAULT = iota + ALIGN_CENTER + ALIGN_RIGHT + ALIGN_LEFT +) + +var ( + decimal = regexp.MustCompile(`^-*\d*\.?\d*$`) + percent = regexp.MustCompile(`^-*\d*\.?\d*$%$`) +) + +type Border struct { + Left bool + Right bool + Top bool + Bottom bool +} + +type Table struct { + out io.Writer + rows [][]string + lines [][][]string + cs map[int]int + rs map[int]int + headers []string + footers []string + autoFmt bool + autoWrap bool + mW int + pCenter string + pRow string + pColumn string + tColumn int + tRow int + hAlign int + fAlign int + align int + newLine string + rowLine bool + hdrLine bool + borders Border + colSize int +} + +// Start New Table +// Take io.Writer Directly +func NewWriter(writer io.Writer) *Table { + t := &Table{ + out: writer, + rows: [][]string{}, + lines: [][][]string{}, + cs: make(map[int]int), + rs: make(map[int]int), + headers: []string{}, + footers: []string{}, + autoFmt: true, + autoWrap: true, + mW: MAX_ROW_WIDTH, + pCenter: CENTER, + pRow: ROW, + pColumn: COLUMN, + tColumn: -1, + tRow: -1, + hAlign: ALIGN_DEFAULT, + fAlign: ALIGN_DEFAULT, + align: ALIGN_DEFAULT, + newLine: NEWLINE, + rowLine: false, + hdrLine: true, + borders: Border{Left: true, Right: true, Bottom: true, Top: true}, + colSize: -1} + return t +} + +// Render table output +func (t Table) Render() { + if t.borders.Top { + t.printLine(true) + } + t.printHeading() + t.printRows() + + if !t.rowLine && t.borders.Bottom { + t.printLine(true) + } + t.printFooter() + +} + +// Set table header +func (t *Table) SetHeader(keys []string) { + t.colSize = len(keys) + for i, v := range keys { + t.parseDimension(v, i, -1) + t.headers = append(t.headers, v) + } +} + +// Set table Footer +func (t *Table) SetFooter(keys []string) { + //t.colSize = len(keys) + for i, v := range keys { + t.parseDimension(v, i, -1) + t.footers = append(t.footers, v) + } +} + +// Turn header autoformatting on/off. Default is on (true). +func (t *Table) SetAutoFormatHeaders(auto bool) { + t.autoFmt = auto +} + +// Turn automatic multiline text adjustment on/off. Default is on (true). +func (t *Table) SetAutoWrapText(auto bool) { + t.autoWrap = auto +} + +// Set the Default column width +func (t *Table) SetColWidth(width int) { + t.mW = width +} + +// Set the Column Separator +func (t *Table) SetColumnSeparator(sep string) { + t.pColumn = sep +} + +// Set the Row Separator +func (t *Table) SetRowSeparator(sep string) { + t.pRow = sep +} + +// Set the center Separator +func (t *Table) SetCenterSeparator(sep string) { + t.pCenter = sep +} + +// Set Header Alignment +func (t *Table) SetHeaderAlignment(hAlign int) { + t.hAlign = hAlign +} + +// Set Footer Alignment +func (t *Table) SetFooterAlignment(fAlign int) { + t.fAlign = fAlign +} + +// Set Table Alignment +func (t *Table) SetAlignment(align int) { + t.align = align +} + +// Set New Line +func (t *Table) SetNewLine(nl string) { + t.newLine = nl +} + +// Set Header Line +// This would enable / disable a line after the header +func (t *Table) SetHeaderLine(line bool) { + t.hdrLine = line +} + +// Set Row Line +// This would enable / disable a line on each row of the table +func (t *Table) SetRowLine(line bool) { + t.rowLine = line +} + +// Set Table Border +// This would enable / disable line around the table +func (t *Table) SetBorder(border bool) { + t.SetBorders(Border{border, border, border, border}) +} + +func (t *Table) SetBorders(border Border) { + t.borders = border +} + +// Append row to table +func (t *Table) Append(row []string) { + rowSize := len(t.headers) + if rowSize > t.colSize { + t.colSize = rowSize + } + + n := len(t.lines) + line := [][]string{} + for i, v := range row { + + // Detect string width + // Detect String height + // Break strings into words + out := t.parseDimension(v, i, n) + + // Append broken words + line = append(line, out) + } + t.lines = append(t.lines, line) +} + +// Allow Support for Bulk Append +// Eliminates repeated for loops +func (t *Table) AppendBulk(rows [][]string) { + for _, row := range rows { + t.Append(row) + } +} + +// Print line based on row width +func (t Table) printLine(nl bool) { + fmt.Fprint(t.out, t.pCenter) + for i := 0; i < len(t.cs); i++ { + v := t.cs[i] + fmt.Fprintf(t.out, "%s%s%s%s", + t.pRow, + strings.Repeat(string(t.pRow), v), + t.pRow, + t.pCenter) + } + if nl { + fmt.Fprint(t.out, t.newLine) + } +} + +// Return the PadRight function if align is left, PadLeft if align is right, +// and Pad by default +func pad(align int) func(string, string, int) string { + padFunc := Pad + switch align { + case ALIGN_LEFT: + padFunc = PadRight + case ALIGN_RIGHT: + padFunc = PadLeft + } + return padFunc +} + +// Print heading information +func (t Table) printHeading() { + // Check if headers is available + if len(t.headers) < 1 { + return + } + + // Check if border is set + // Replace with space if not set + fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) + + // Identify last column + end := len(t.cs) - 1 + + // Get pad function + padFunc := pad(t.hAlign) + + // Print Heading column + for i := 0; i <= end; i++ { + v := t.cs[i] + h := t.headers[i] + if t.autoFmt { + h = Title(h) + } + pad := ConditionString((i == end && !t.borders.Left), SPACE, t.pColumn) + fmt.Fprintf(t.out, " %s %s", + padFunc(h, SPACE, v), + pad) + } + // Next line + fmt.Fprint(t.out, t.newLine) + if t.hdrLine { + t.printLine(true) + } +} + +// Print heading information +func (t Table) printFooter() { + // Check if headers is available + if len(t.footers) < 1 { + return + } + + // Only print line if border is not set + if !t.borders.Bottom { + t.printLine(true) + } + // Check if border is set + // Replace with space if not set + fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE)) + + // Identify last column + end := len(t.cs) - 1 + + // Get pad function + padFunc := pad(t.fAlign) + + // Print Heading column + for i := 0; i <= end; i++ { + v := t.cs[i] + f := t.footers[i] + if t.autoFmt { + f = Title(f) + } + pad := ConditionString((i == end && !t.borders.Top), SPACE, t.pColumn) + + if len(t.footers[i]) == 0 { + pad = SPACE + } + fmt.Fprintf(t.out, " %s %s", + padFunc(f, SPACE, v), + pad) + } + // Next line + fmt.Fprint(t.out, t.newLine) + //t.printLine(true) + + hasPrinted := false + + for i := 0; i <= end; i++ { + v := t.cs[i] + pad := t.pRow + center := t.pCenter + length := len(t.footers[i]) + + if length > 0 { + hasPrinted = true + } + + // Set center to be space if length is 0 + if length == 0 && !t.borders.Right { + center = SPACE + } + + // Print first junction + if i == 0 { + fmt.Fprint(t.out, center) + } + + // Pad With space of length is 0 + if length == 0 { + pad = SPACE + } + // Ignore left space of it has printed before + if hasPrinted || t.borders.Left { + pad = t.pRow + center = t.pCenter + } + + // Change Center start position + if center == SPACE { + if i < end && len(t.footers[i+1]) != 0 { + center = t.pCenter + } + } + + // Print the footer + fmt.Fprintf(t.out, "%s%s%s%s", + pad, + strings.Repeat(string(pad), v), + pad, + center) + + } + + fmt.Fprint(t.out, t.newLine) + +} + +func (t Table) printRows() { + for i, lines := range t.lines { + t.printRow(lines, i) + } + +} + +// Print Row Information +// Adjust column alignment based on type + +func (t Table) printRow(columns [][]string, colKey int) { + // Get Maximum Height + max := t.rs[colKey] + total := len(columns) + + // TODO Fix uneven col size + // if total < t.colSize { + // for n := t.colSize - total; n < t.colSize ; n++ { + // columns = append(columns, []string{SPACE}) + // t.cs[n] = t.mW + // } + //} + + // Pad Each Height + // pads := []int{} + pads := []int{} + + for i, line := range columns { + length := len(line) + pad := max - length + pads = append(pads, pad) + for n := 0; n < pad; n++ { + columns[i] = append(columns[i], " ") + } + } + //fmt.Println(max, "\n") + for x := 0; x < max; x++ { + for y := 0; y < total; y++ { + + // Check if border is set + fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn)) + + fmt.Fprintf(t.out, SPACE) + str := columns[y][x] + + // This would print alignment + // Default alignment would use multiple configuration + switch t.align { + case ALIGN_CENTER: // + fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) + case ALIGN_RIGHT: + fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) + case ALIGN_LEFT: + fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) + default: + if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) { + fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y])) + } else { + fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) + + // TODO Custom alignment per column + //if max == 1 || pads[y] > 0 { + // fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y])) + //} else { + // fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y])) + //} + + } + } + fmt.Fprintf(t.out, SPACE) + } + // Check if border is set + // Replace with space if not set + fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE)) + fmt.Fprint(t.out, t.newLine) + } + + if t.rowLine { + t.printLine(true) + } + +} + +func (t *Table) parseDimension(str string, colKey, rowKey int) []string { + var ( + raw []string + max int + ) + w := DisplayWidth(str) + // Calculate Width + // Check if with is grater than maximum width + if w > t.mW { + w = t.mW + } + + // Check if width exists + v, ok := t.cs[colKey] + if !ok || v < w || v == 0 { + t.cs[colKey] = w + } + + if rowKey == -1 { + return raw + } + // Calculate Height + if t.autoWrap { + raw, _ = WrapString(str, t.cs[colKey]) + } else { + raw = getLines(str) + } + + for _, line := range raw { + if w := DisplayWidth(line); w > max { + max = w + } + } + + // Make sure the with is the same length as maximum word + // Important for cases where the width is smaller than maxu word + if max > t.cs[colKey] { + t.cs[colKey] = max + } + + h := len(raw) + v, ok = t.rs[rowKey] + + if !ok || v < h || v == 0 { + t.rs[rowKey] = h + } + //fmt.Printf("Raw %+v %d\n", raw, len(raw)) + return raw +} diff --git a/vendor/github.com/olekukonko/tablewriter/util.go b/vendor/github.com/olekukonko/tablewriter/util.go new file mode 100644 index 0000000000..2deefbc52a --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/util.go @@ -0,0 +1,72 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +package tablewriter + +import ( + "math" + "regexp" + "strings" + + "github.com/mattn/go-runewidth" +) + +var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]") + +func DisplayWidth(str string) int { + return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, "")) +} + +// Simple Condition for string +// Returns value based on condition +func ConditionString(cond bool, valid, inValid string) string { + if cond { + return valid + } + return inValid +} + +// Format Table Header +// Replace _ , . and spaces +func Title(name string) string { + name = strings.Replace(name, "_", " ", -1) + name = strings.Replace(name, ".", " ", -1) + name = strings.TrimSpace(name) + return strings.ToUpper(name) +} + +// Pad String +// Attempts to play string in the center +func Pad(s, pad string, width int) string { + gap := width - DisplayWidth(s) + if gap > 0 { + gapLeft := int(math.Ceil(float64(gap / 2))) + gapRight := gap - gapLeft + return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight) + } + return s +} + +// Pad String Right position +// This would pace string at the left side fo the screen +func PadRight(s, pad string, width int) string { + gap := width - DisplayWidth(s) + if gap > 0 { + return s + strings.Repeat(string(pad), gap) + } + return s +} + +// Pad String Left position +// This would pace string at the right side fo the screen +func PadLeft(s, pad string, width int) string { + gap := width - DisplayWidth(s) + if gap > 0 { + return strings.Repeat(string(pad), gap) + s + } + return s +} diff --git a/vendor/github.com/olekukonko/tablewriter/wrap.go b/vendor/github.com/olekukonko/tablewriter/wrap.go new file mode 100644 index 0000000000..f3747d9f32 --- /dev/null +++ b/vendor/github.com/olekukonko/tablewriter/wrap.go @@ -0,0 +1,103 @@ +// Copyright 2014 Oleku Konko All rights reserved. +// Use of this source code is governed by a MIT +// license that can be found in the LICENSE file. + +// This module is a Table Writer API for the Go Programming Language. +// The protocols were written in pure Go and works on windows and unix systems + +package tablewriter + +import ( + "math" + "strings" + "unicode/utf8" +) + +var ( + nl = "\n" + sp = " " +) + +const defaultPenalty = 1e5 + +// Wrap wraps s into a paragraph of lines of length lim, with minimal +// raggedness. +func WrapString(s string, lim int) ([]string, int) { + words := strings.Split(strings.Replace(strings.TrimSpace(s), nl, sp, -1), sp) + var lines []string + max := 0 + for _, v := range words { + max = len(v) + if max > lim { + lim = max + } + } + for _, line := range WrapWords(words, 1, lim, defaultPenalty) { + lines = append(lines, strings.Join(line, sp)) + } + return lines, lim +} + +// WrapWords is the low-level line-breaking algorithm, useful if you need more +// control over the details of the text wrapping process. For most uses, +// WrapString will be sufficient and more convenient. +// +// WrapWords splits a list of words into lines with minimal "raggedness", +// treating each rune as one unit, accounting for spc units between adjacent +// words on each line, and attempting to limit lines to lim units. Raggedness +// is the total error over all lines, where error is the square of the +// difference of the length of the line and lim. Too-long lines (which only +// happen when a single word is longer than lim units) have pen penalty units +// added to the error. +func WrapWords(words []string, spc, lim, pen int) [][]string { + n := len(words) + + length := make([][]int, n) + for i := 0; i < n; i++ { + length[i] = make([]int, n) + length[i][i] = utf8.RuneCountInString(words[i]) + for j := i + 1; j < n; j++ { + length[i][j] = length[i][j-1] + spc + utf8.RuneCountInString(words[j]) + } + } + nbrk := make([]int, n) + cost := make([]int, n) + for i := range cost { + cost[i] = math.MaxInt32 + } + for i := n - 1; i >= 0; i-- { + if length[i][n-1] <= lim { + cost[i] = 0 + nbrk[i] = n + } else { + for j := i + 1; j < n; j++ { + d := lim - length[i][j-1] + c := d*d + cost[j] + if length[i][j-1] > lim { + c += pen // too-long lines get a worse penalty + } + if c < cost[i] { + cost[i] = c + nbrk[i] = j + } + } + } + } + var lines [][]string + i := 0 + for i < n { + lines = append(lines, words[i:nbrk[i]]) + i = nbrk[i] + } + return lines +} + +// getLines decomposes a multiline string into a slice of strings. +func getLines(s string) []string { + var lines []string + + for _, line := range strings.Split(strings.TrimSpace(s), nl) { + lines = append(lines, line) + } + return lines +} diff --git a/vendor/github.com/philhofer/fwd/LICENSE.md b/vendor/github.com/philhofer/fwd/LICENSE.md new file mode 100644 index 0000000000..1ac6a81f6a --- /dev/null +++ b/vendor/github.com/philhofer/fwd/LICENSE.md @@ -0,0 +1,7 @@ +Copyright (c) 2014-2015, Philip Hofer + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/philhofer/fwd/reader.go b/vendor/github.com/philhofer/fwd/reader.go new file mode 100644 index 0000000000..e591910306 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/reader.go @@ -0,0 +1,379 @@ +// The `fwd` package provides a buffered reader +// and writer. Each has methods that help improve +// the encoding/decoding performance of some binary +// protocols. +// +// The `fwd.Writer` and `fwd.Reader` type provide similar +// functionality to their counterparts in `bufio`, plus +// a few extra utility methods that simplify read-ahead +// and write-ahead. I wrote this package to improve serialization +// performance for http://github.com/tinylib/msgp, +// where it provided about a 2x speedup over `bufio` for certain +// workloads. However, care must be taken to understand the semantics of the +// extra methods provided by this package, as they allow +// the user to access and manipulate the buffer memory +// directly. +// +// The extra methods for `fwd.Reader` are `Peek`, `Skip` +// and `Next`. `(*fwd.Reader).Peek`, unlike `(*bufio.Reader).Peek`, +// will re-allocate the read buffer in order to accommodate arbitrarily +// large read-ahead. `(*fwd.Reader).Skip` skips the next `n` bytes +// in the stream, and uses the `io.Seeker` interface if the underlying +// stream implements it. `(*fwd.Reader).Next` returns a slice pointing +// to the next `n` bytes in the read buffer (like `Peek`), but also +// increments the read position. This allows users to process streams +// in arbitrary block sizes without having to manage appropriately-sized +// slices. Additionally, obviating the need to copy the data from the +// buffer to another location in memory can improve performance dramatically +// in CPU-bound applications. +// +// `fwd.Writer` only has one extra method, which is `(*fwd.Writer).Next`, which +// returns a slice pointing to the next `n` bytes of the writer, and increments +// the write position by the length of the returned slice. This allows users +// to write directly to the end of the buffer. +// +package fwd + +import "io" + +const ( + // DefaultReaderSize is the default size of the read buffer + DefaultReaderSize = 2048 + + // minimum read buffer; straight from bufio + minReaderSize = 16 +) + +// NewReader returns a new *Reader that reads from 'r' +func NewReader(r io.Reader) *Reader { + return NewReaderSize(r, DefaultReaderSize) +} + +// NewReaderSize returns a new *Reader that +// reads from 'r' and has a buffer size 'n' +func NewReaderSize(r io.Reader, n int) *Reader { + rd := &Reader{ + r: r, + data: make([]byte, 0, max(minReaderSize, n)), + } + if s, ok := r.(io.Seeker); ok { + rd.rs = s + } + return rd +} + +// Reader is a buffered look-ahead reader +type Reader struct { + r io.Reader // underlying reader + + // data[n:len(data)] is buffered data; data[len(data):cap(data)] is free buffer space + data []byte // data + n int // read offset + state error // last read error + + // if the reader past to NewReader was + // also an io.Seeker, this is non-nil + rs io.Seeker +} + +// Reset resets the underlying reader +// and the read buffer. +func (r *Reader) Reset(rd io.Reader) { + r.r = rd + r.data = r.data[0:0] + r.n = 0 + r.state = nil + if s, ok := rd.(io.Seeker); ok { + r.rs = s + } else { + r.rs = nil + } +} + +// more() does one read on the underlying reader +func (r *Reader) more() { + // move data backwards so that + // the read offset is 0; this way + // we can supply the maximum number of + // bytes to the reader + if r.n != 0 { + if r.n < len(r.data) { + r.data = r.data[:copy(r.data[0:], r.data[r.n:])] + } else { + r.data = r.data[:0] + } + r.n = 0 + } + var a int + a, r.state = r.r.Read(r.data[len(r.data):cap(r.data)]) + if a == 0 && r.state == nil { + r.state = io.ErrNoProgress + return + } + r.data = r.data[:len(r.data)+a] +} + +// pop error +func (r *Reader) err() (e error) { + e, r.state = r.state, nil + return +} + +// pop error; EOF -> io.ErrUnexpectedEOF +func (r *Reader) noEOF() (e error) { + e, r.state = r.state, nil + if e == io.EOF { + e = io.ErrUnexpectedEOF + } + return +} + +// buffered bytes +func (r *Reader) buffered() int { return len(r.data) - r.n } + +// Buffered returns the number of bytes currently in the buffer +func (r *Reader) Buffered() int { return len(r.data) - r.n } + +// BufferSize returns the total size of the buffer +func (r *Reader) BufferSize() int { return cap(r.data) } + +// Peek returns the next 'n' buffered bytes, +// reading from the underlying reader if necessary. +// It will only return a slice shorter than 'n' bytes +// if it also returns an error. Peek does not advance +// the reader. EOF errors are *not* returned as +// io.ErrUnexpectedEOF. +func (r *Reader) Peek(n int) ([]byte, error) { + // in the degenerate case, + // we may need to realloc + // (the caller asked for more + // bytes than the size of the buffer) + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // keep filling until + // we hit an error or + // read enough bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + // we must have hit an error + if r.buffered() < n { + return r.data[r.n:], r.err() + } + + return r.data[r.n : r.n+n], nil +} + +// Skip moves the reader forward 'n' bytes. +// Returns the number of bytes skipped and any +// errors encountered. It is analogous to Seek(n, 1). +// If the underlying reader implements io.Seeker, then +// that method will be used to skip forward. +// +// If the reader encounters +// an EOF before skipping 'n' bytes, it +// returns io.ErrUnexpectedEOF. If the +// underlying reader implements io.Seeker, then +// those rules apply instead. (Many implementations +// will not return `io.EOF` until the next call +// to Read.) +func (r *Reader) Skip(n int) (int, error) { + + // fast path + if r.buffered() >= n { + r.n += n + return n, nil + } + + // use seeker implementation + // if we can + if r.rs != nil { + return r.skipSeek(n) + } + + // loop on filling + // and then erasing + o := n + for r.buffered() < n && r.state == nil { + r.more() + // we can skip forward + // up to r.buffered() bytes + step := min(r.buffered(), n) + r.n += step + n -= step + } + // at this point, n should be + // 0 if everything went smoothly + return o - n, r.noEOF() +} + +// Next returns the next 'n' bytes in the stream. +// Unlike Peek, Next advances the reader position. +// The returned bytes point to the same +// data as the buffer, so the slice is +// only valid until the next reader method call. +// An EOF is considered an unexpected error. +// If an the returned slice is less than the +// length asked for, an error will be returned, +// and the reader position will not be incremented. +func (r *Reader) Next(n int) ([]byte, error) { + + // in case the buffer is too small + if cap(r.data) < n { + old := r.data[r.n:] + r.data = make([]byte, n+r.buffered()) + r.data = r.data[:copy(r.data, old)] + r.n = 0 + } + + // fill at least 'n' bytes + for r.buffered() < n && r.state == nil { + r.more() + } + + if r.buffered() < n { + return r.data[r.n:], r.noEOF() + } + out := r.data[r.n : r.n+n] + r.n += n + return out, nil +} + +// skipSeek uses the io.Seeker to seek forward. +// only call this function when n > r.buffered() +func (r *Reader) skipSeek(n int) (int, error) { + o := r.buffered() + // first, clear buffer + n -= o + r.n = 0 + r.data = r.data[:0] + + // then seek forward remaning bytes + i, err := r.rs.Seek(int64(n), 1) + return int(i) + o, err +} + +// Read implements `io.Reader` +func (r *Reader) Read(b []byte) (int, error) { + // if we have data in the buffer, just + // return that. + if r.buffered() != 0 { + x := copy(b, r.data[r.n:]) + r.n += x + return x, nil + } + var n int + // we have no buffered data; determine + // whether or not to buffer or call + // the underlying reader directly + if len(b) >= cap(r.data) { + n, r.state = r.r.Read(b) + } else { + r.more() + n = copy(b, r.data) + r.n = n + } + if n == 0 { + return 0, r.err() + } + return n, nil +} + +// ReadFull attempts to read len(b) bytes into +// 'b'. It returns the number of bytes read into +// 'b', and an error if it does not return len(b). +// EOF is considered an unexpected error. +func (r *Reader) ReadFull(b []byte) (int, error) { + var n int // read into b + var nn int // scratch + l := len(b) + // either read buffered data, + // or read directly for the underlying + // buffer, or fetch more buffered data. + for n < l && r.state == nil { + if r.buffered() != 0 { + nn = copy(b[n:], r.data[r.n:]) + n += nn + r.n += nn + } else if l-n > cap(r.data) { + nn, r.state = r.r.Read(b[n:]) + n += nn + } else { + r.more() + } + } + if n < l { + return n, r.noEOF() + } + return n, nil +} + +// ReadByte implements `io.ByteReader` +func (r *Reader) ReadByte() (byte, error) { + for r.buffered() < 1 && r.state == nil { + r.more() + } + if r.buffered() < 1 { + return 0, r.err() + } + b := r.data[r.n] + r.n++ + return b, nil +} + +// WriteTo implements `io.WriterTo` +func (r *Reader) WriteTo(w io.Writer) (int64, error) { + var ( + i int64 + ii int + err error + ) + // first, clear buffer + if r.buffered() > 0 { + ii, err = w.Write(r.data[r.n:]) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + for r.state == nil { + // here we just do + // 1:1 reads and writes + r.more() + if r.buffered() > 0 { + ii, err = w.Write(r.data) + i += int64(ii) + if err != nil { + return i, err + } + r.data = r.data[0:0] + r.n = 0 + } + } + if r.state != io.EOF { + return i, r.err() + } + return i, nil +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +func max(a int, b int) int { + if a < b { + return b + } + return a +} diff --git a/vendor/github.com/philhofer/fwd/writer.go b/vendor/github.com/philhofer/fwd/writer.go new file mode 100644 index 0000000000..2dc392a91b --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer.go @@ -0,0 +1,224 @@ +package fwd + +import "io" + +const ( + // DefaultWriterSize is the + // default write buffer size. + DefaultWriterSize = 2048 + + minWriterSize = minReaderSize +) + +// Writer is a buffered writer +type Writer struct { + w io.Writer // writer + buf []byte // 0:len(buf) is bufered data +} + +// NewWriter returns a new writer +// that writes to 'w' and has a buffer +// that is `DefaultWriterSize` bytes. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, DefaultWriterSize), + } +} + +// NewWriterSize returns a new writer +// that writes to 'w' and has a buffer +// that is 'size' bytes. +func NewWriterSize(w io.Writer, size int) *Writer { + if wr, ok := w.(*Writer); ok && cap(wr.buf) >= size { + return wr + } + return &Writer{ + w: w, + buf: make([]byte, 0, max(size, minWriterSize)), + } +} + +// Buffered returns the number of buffered bytes +// in the reader. +func (w *Writer) Buffered() int { return len(w.buf) } + +// BufferSize returns the maximum size of the buffer. +func (w *Writer) BufferSize() int { return cap(w.buf) } + +// Flush flushes any buffered bytes +// to the underlying writer. +func (w *Writer) Flush() error { + l := len(w.buf) + if l > 0 { + n, err := w.w.Write(w.buf) + + // if we didn't write the whole + // thing, copy the unwritten + // bytes to the beginnning of the + // buffer. + if n < l && n > 0 { + w.pushback(n) + if err == nil { + err = io.ErrShortWrite + } + } + if err != nil { + return err + } + w.buf = w.buf[:0] + return nil + } + return nil +} + +// Write implements `io.Writer` +func (w *Writer) Write(p []byte) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(p) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + if c < ln { + return w.w.Write(p) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], p), nil +} + +// WriteString is analogous to Write, but it takes a string. +func (w *Writer) WriteString(s string) (int, error) { + c, l, ln := cap(w.buf), len(w.buf), len(s) + avail := c - l + + // requires flush + if avail < ln { + if err := w.Flush(); err != nil { + return 0, err + } + l = len(w.buf) + } + // too big to fit in buffer; + // write directly to w.w + // + // yes, this is unsafe. *but* + // io.Writer is not allowed + // to mutate its input or + // maintain a reference to it, + // per the spec in package io. + // + // plus, if the string is really + // too big to fit in the buffer, then + // creating a copy to write it is + // expensive (and, strictly speaking, + // unnecessary) + if c < ln { + return w.w.Write(unsafestr(s)) + } + + // grow buf slice; copy; return + w.buf = w.buf[:l+ln] + return copy(w.buf[l:], s), nil +} + +// WriteByte implements `io.ByteWriter` +func (w *Writer) WriteByte(b byte) error { + if len(w.buf) == cap(w.buf) { + if err := w.Flush(); err != nil { + return err + } + } + w.buf = append(w.buf, b) + return nil +} + +// Next returns the next 'n' free bytes +// in the write buffer, flushing the writer +// as necessary. Next will return `io.ErrShortBuffer` +// if 'n' is greater than the size of the write buffer. +// Calls to 'next' increment the write position by +// the size of the returned buffer. +func (w *Writer) Next(n int) ([]byte, error) { + c, l := cap(w.buf), len(w.buf) + if n > c { + return nil, io.ErrShortBuffer + } + avail := c - l + if avail < n { + if err := w.Flush(); err != nil { + return nil, err + } + l = len(w.buf) + } + w.buf = w.buf[:l+n] + return w.buf[l:], nil +} + +// take the bytes from w.buf[n:len(w.buf)] +// and put them at the beginning of w.buf, +// and resize to the length of the copied segment. +func (w *Writer) pushback(n int) { + w.buf = w.buf[:copy(w.buf, w.buf[n:])] +} + +// ReadFrom implements `io.ReaderFrom` +func (w *Writer) ReadFrom(r io.Reader) (int64, error) { + // anticipatory flush + if err := w.Flush(); err != nil { + return 0, err + } + + w.buf = w.buf[0:cap(w.buf)] // expand buffer + + var nn int64 // written + var err error // error + var x int // read + + // 1:1 reads and writes + for err == nil { + x, err = r.Read(w.buf) + if x > 0 { + n, werr := w.w.Write(w.buf[:x]) + nn += int64(n) + + if err != nil { + if n < x && n > 0 { + w.pushback(n - x) + } + return nn, werr + } + if n < x { + w.pushback(n - x) + return nn, io.ErrShortWrite + } + } else if err == nil { + err = io.ErrNoProgress + break + } + } + if err != io.EOF { + return nn, err + } + + // we only clear here + // because we are sure + // the writes have + // succeeded. otherwise, + // we retain the data in case + // future writes succeed. + w.buf = w.buf[0:0] + + return nn, nil +} diff --git a/vendor/github.com/philhofer/fwd/writer_appengine.go b/vendor/github.com/philhofer/fwd/writer_appengine.go new file mode 100644 index 0000000000..e367f39317 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_appengine.go @@ -0,0 +1,5 @@ +// +build appengine + +package fwd + +func unsafestr(s string) []byte { return []byte(s) } diff --git a/vendor/github.com/philhofer/fwd/writer_unsafe.go b/vendor/github.com/philhofer/fwd/writer_unsafe.go new file mode 100644 index 0000000000..a0bf453b39 --- /dev/null +++ b/vendor/github.com/philhofer/fwd/writer_unsafe.go @@ -0,0 +1,18 @@ +// +build !appengine + +package fwd + +import ( + "reflect" + "unsafe" +) + +// unsafe cast string as []byte +func unsafestr(b string) []byte { + l := len(b) + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: l, + Cap: l, + Data: (*reflect.StringHeader)(unsafe.Pointer(&b)).Data, + })) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/LICENSE b/vendor/github.com/tinylib/msgp/msgp/LICENSE new file mode 100644 index 0000000000..14d60424e8 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2014 Philip Hofer +Portions Copyright (c) 2009 The Go Authors (license at http://golang.org) where indicated + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_linux.go b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go new file mode 100644 index 0000000000..6c6bb37a5f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_linux.go @@ -0,0 +1,24 @@ +// +build linux,!appengine + +package msgp + +import ( + "os" + "syscall" +) + +func adviseRead(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL|syscall.MADV_WILLNEED) +} + +func adviseWrite(mem []byte) { + syscall.Madvise(mem, syscall.MADV_SEQUENTIAL) +} + +func fallocate(f *os.File, sz int64) error { + err := syscall.Fallocate(int(f.Fd()), 0, 0, sz) + if err == syscall.ENOTSUP { + return f.Truncate(sz) + } + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/advise_other.go b/vendor/github.com/tinylib/msgp/msgp/advise_other.go new file mode 100644 index 0000000000..da65ea5412 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/advise_other.go @@ -0,0 +1,17 @@ +// +build !linux appengine + +package msgp + +import ( + "os" +) + +// TODO: darwin, BSD support + +func adviseRead(mem []byte) {} + +func adviseWrite(mem []byte) {} + +func fallocate(f *os.File, sz int64) error { + return f.Truncate(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/appengine.go b/vendor/github.com/tinylib/msgp/msgp/appengine.go new file mode 100644 index 0000000000..bff9e768ab --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/appengine.go @@ -0,0 +1,15 @@ +// +build appengine + +package msgp + +// let's just assume appengine +// uses 64-bit hardware... +const smallint = false + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/circular.go b/vendor/github.com/tinylib/msgp/msgp/circular.go new file mode 100644 index 0000000000..a0434c7ea1 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/circular.go @@ -0,0 +1,39 @@ +package msgp + +type timer interface { + StartTimer() + StopTimer() +} + +// EndlessReader is an io.Reader +// that loops over the same data +// endlessly. It is used for benchmarking. +type EndlessReader struct { + tb timer + data []byte + offset int +} + +// NewEndlessReader returns a new endless reader +func NewEndlessReader(b []byte, tb timer) *EndlessReader { + return &EndlessReader{tb: tb, data: b, offset: 0} +} + +// Read implements io.Reader. In practice, it +// always returns (len(p), nil), although it +// fills the supplied slice while the benchmark +// timer is stopped. +func (c *EndlessReader) Read(p []byte) (int, error) { + c.tb.StopTimer() + var n int + l := len(p) + m := len(c.data) + for n < l { + nn := copy(p[n:], c.data[c.offset:]) + n += nn + c.offset += nn + c.offset %= m + } + c.tb.StartTimer() + return n, nil +} diff --git a/vendor/github.com/tinylib/msgp/msgp/defs.go b/vendor/github.com/tinylib/msgp/msgp/defs.go new file mode 100644 index 0000000000..c634eef1df --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/defs.go @@ -0,0 +1,142 @@ +// This package is the support library for the msgp code generator (http://github.com/tinylib/msgp). +// +// This package defines the utilites used by the msgp code generator for encoding and decoding MessagePack +// from []byte and io.Reader/io.Writer types. Much of this package is devoted to helping the msgp code +// generator implement the Marshaler/Unmarshaler and Encodable/Decodable interfaces. +// +// This package defines four "families" of functions: +// - AppendXxxx() appends an object to a []byte in MessagePack encoding. +// - ReadXxxxBytes() reads an object from a []byte and returns the remaining bytes. +// - (*Writer).WriteXxxx() writes an object to the buffered *Writer type. +// - (*Reader).ReadXxxx() reads an object from a buffered *Reader type. +// +// Once a type has satisfied the `Encodable` and `Decodable` interfaces, +// it can be written and read from arbitrary `io.Writer`s and `io.Reader`s using +// msgp.Encode(io.Writer, msgp.Encodable) +// and +// msgp.Decode(io.Reader, msgp.Decodable) +// +// There are also methods for converting MessagePack to JSON without +// an explicit de-serialization step. +// +// For additional tips, tricks, and gotchas, please visit +// the wiki at http://github.com/tinylib/msgp +package msgp + +const last4 = 0x0f +const first4 = 0xf0 +const last5 = 0x1f +const first3 = 0xe0 +const last7 = 0x7f + +func isfixint(b byte) bool { + return b>>7 == 0 +} + +func isnfixint(b byte) bool { + return b&first3 == mnfixint +} + +func isfixmap(b byte) bool { + return b&first4 == mfixmap +} + +func isfixarray(b byte) bool { + return b&first4 == mfixarray +} + +func isfixstr(b byte) bool { + return b&first3 == mfixstr +} + +func wfixint(u uint8) byte { + return u & last7 +} + +func rfixint(b byte) uint8 { + return b +} + +func wnfixint(i int8) byte { + return byte(i) | mnfixint +} + +func rnfixint(b byte) int8 { + return int8(b) +} + +func rfixmap(b byte) uint8 { + return b & last4 +} + +func wfixmap(u uint8) byte { + return mfixmap | (u & last4) +} + +func rfixstr(b byte) uint8 { + return b & last5 +} + +func wfixstr(u uint8) byte { + return (u & last5) | mfixstr +} + +func rfixarray(b byte) uint8 { + return (b & last4) +} + +func wfixarray(u uint8) byte { + return (u & last4) | mfixarray +} + +// These are all the byte +// prefixes defined by the +// msgpack standard +const ( + // 0XXXXXXX + mfixint uint8 = 0x00 + + // 111XXXXX + mnfixint uint8 = 0xe0 + + // 1000XXXX + mfixmap uint8 = 0x80 + + // 1001XXXX + mfixarray uint8 = 0x90 + + // 101XXXXX + mfixstr uint8 = 0xa0 + + mnil uint8 = 0xc0 + mfalse uint8 = 0xc2 + mtrue uint8 = 0xc3 + mbin8 uint8 = 0xc4 + mbin16 uint8 = 0xc5 + mbin32 uint8 = 0xc6 + mext8 uint8 = 0xc7 + mext16 uint8 = 0xc8 + mext32 uint8 = 0xc9 + mfloat32 uint8 = 0xca + mfloat64 uint8 = 0xcb + muint8 uint8 = 0xcc + muint16 uint8 = 0xcd + muint32 uint8 = 0xce + muint64 uint8 = 0xcf + mint8 uint8 = 0xd0 + mint16 uint8 = 0xd1 + mint32 uint8 = 0xd2 + mint64 uint8 = 0xd3 + mfixext1 uint8 = 0xd4 + mfixext2 uint8 = 0xd5 + mfixext4 uint8 = 0xd6 + mfixext8 uint8 = 0xd7 + mfixext16 uint8 = 0xd8 + mstr8 uint8 = 0xd9 + mstr16 uint8 = 0xda + mstr32 uint8 = 0xdb + marray16 uint8 = 0xdc + marray32 uint8 = 0xdd + mmap16 uint8 = 0xde + mmap32 uint8 = 0xdf +) diff --git a/vendor/github.com/tinylib/msgp/msgp/edit.go b/vendor/github.com/tinylib/msgp/msgp/edit.go new file mode 100644 index 0000000000..41f9298646 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/edit.go @@ -0,0 +1,241 @@ +package msgp + +import ( + "math" +) + +// Locate returns a []byte pointing to the field +// in a messagepack map with the provided key. (The returned []byte +// points to a sub-slice of 'raw'; Locate does no allocations.) If the +// key doesn't exist in the map, a zero-length []byte will be returned. +func Locate(key string, raw []byte) []byte { + s, n := locate(raw, key) + return raw[s:n] +} + +// Replace takes a key ("key") in a messagepack map ("raw") +// and replaces its value with the one provided and returns +// the new []byte. The returned []byte may point to the same +// memory as "raw". Replace makes no effort to evaluate the validity +// of the contents of 'val'. It may use up to the full capacity of 'raw.' +// Replace returns 'nil' if the field doesn't exist or if the object in 'raw' +// is not a map. +func Replace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, true) +} + +// CopyReplace works similarly to Replace except that the returned +// byte slice does not point to the same memory as 'raw'. CopyReplace +// returns 'nil' if the field doesn't exist or 'raw' isn't a map. +func CopyReplace(key string, raw []byte, val []byte) []byte { + start, end := locate(raw, key) + if start == end { + return nil + } + return replace(raw, start, end, val, false) +} + +// Remove removes a key-value pair from 'raw'. It returns +// 'raw' unchanged if the key didn't exist. +func Remove(key string, raw []byte) []byte { + start, end := locateKV(raw, key) + if start == end { + return raw + } + raw = raw[:start+copy(raw[start:], raw[end:])] + return resizeMap(raw, -1) +} + +// HasKey returns whether the map in 'raw' has +// a field with key 'key' +func HasKey(key string, raw []byte) bool { + sz, bts, err := ReadMapHeaderBytes(raw) + if err != nil { + return false + } + var field []byte + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return false + } + if UnsafeString(field) == key { + return true + } + } + return false +} + +func replace(raw []byte, start int, end int, val []byte, inplace bool) []byte { + ll := end - start // length of segment to replace + lv := len(val) + + if inplace { + extra := lv - ll + + // fastest case: we're doing + // a 1:1 replacement + if extra == 0 { + copy(raw[start:], val) + return raw + + } else if extra < 0 { + // 'val' smaller than replaced value + // copy in place and shift back + + x := copy(raw[start:], val) + y := copy(raw[start+x:], raw[end:]) + return raw[:start+x+y] + + } else if extra < cap(raw)-len(raw) { + // 'val' less than (cap-len) extra bytes + // copy in place and shift forward + raw = raw[0 : len(raw)+extra] + // shift end forward + copy(raw[end+extra:], raw[end:]) + copy(raw[start:], val) + return raw + } + } + + // we have to allocate new space + out := make([]byte, len(raw)+len(val)-ll) + x := copy(out, raw[:start]) + y := copy(out[x:], val) + copy(out[x+y:], raw[end:]) + return out +} + +// locate does a naive O(n) search for the map key; returns start, end +// (returns 0,0 on error) +func locate(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return + } + + // loop and locate field + for i := uint32(0); i < sz; i++ { + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + // start location + l := len(raw) + start = l - len(bts) + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = l - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// locate key AND value +func locateKV(raw []byte, key string) (start int, end int) { + var ( + sz uint32 + bts []byte + field []byte + err error + ) + sz, bts, err = ReadMapHeaderBytes(raw) + if err != nil { + return 0, 0 + } + + for i := uint32(0); i < sz; i++ { + tmp := len(bts) + field, bts, err = ReadStringZC(bts) + if err != nil { + return 0, 0 + } + if UnsafeString(field) == key { + start = len(raw) - tmp + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + end = len(raw) - len(bts) + return + } + bts, err = Skip(bts) + if err != nil { + return 0, 0 + } + } + return 0, 0 +} + +// delta is delta on map size +func resizeMap(raw []byte, delta int64) []byte { + var sz int64 + switch raw[0] { + case mmap16: + sz = int64(big.Uint16(raw[1:])) + if sz+delta <= math.MaxUint16 { + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[5:], raw[3:]) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[3:]...) + + case mmap32: + sz = int64(big.Uint32(raw[1:])) + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + + default: + sz = int64(rfixmap(raw[0])) + if sz+delta < 16 { + raw[0] = wfixmap(uint8(sz + delta)) + return raw + } else if sz+delta <= math.MaxUint16 { + if cap(raw)-len(raw) >= 2 { + raw = raw[0 : len(raw)+2] + copy(raw[3:], raw[1:]) + raw[0] = mmap16 + big.PutUint16(raw[1:], uint16(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } + if cap(raw)-len(raw) >= 4 { + raw = raw[0 : len(raw)+4] + copy(raw[5:], raw[1:]) + raw[0] = mmap32 + big.PutUint32(raw[1:], uint32(sz+delta)) + return raw + } + n := make([]byte, 0, len(raw)+5) + n = AppendMapHeader(n, uint32(sz+delta)) + return append(n, raw[1:]...) + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/elsize.go b/vendor/github.com/tinylib/msgp/msgp/elsize.go new file mode 100644 index 0000000000..95762e7eeb --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/elsize.go @@ -0,0 +1,99 @@ +package msgp + +// size of every object on the wire, +// plus type information. gives us +// constant-time type information +// for traversing composite objects. +// +var sizes = [256]bytespec{ + mnil: {size: 1, extra: constsize, typ: NilType}, + mfalse: {size: 1, extra: constsize, typ: BoolType}, + mtrue: {size: 1, extra: constsize, typ: BoolType}, + mbin8: {size: 2, extra: extra8, typ: BinType}, + mbin16: {size: 3, extra: extra16, typ: BinType}, + mbin32: {size: 5, extra: extra32, typ: BinType}, + mext8: {size: 3, extra: extra8, typ: ExtensionType}, + mext16: {size: 4, extra: extra16, typ: ExtensionType}, + mext32: {size: 6, extra: extra32, typ: ExtensionType}, + mfloat32: {size: 5, extra: constsize, typ: Float32Type}, + mfloat64: {size: 9, extra: constsize, typ: Float64Type}, + muint8: {size: 2, extra: constsize, typ: UintType}, + muint16: {size: 3, extra: constsize, typ: UintType}, + muint32: {size: 5, extra: constsize, typ: UintType}, + muint64: {size: 9, extra: constsize, typ: UintType}, + mint8: {size: 2, extra: constsize, typ: IntType}, + mint16: {size: 3, extra: constsize, typ: IntType}, + mint32: {size: 5, extra: constsize, typ: IntType}, + mint64: {size: 9, extra: constsize, typ: IntType}, + mfixext1: {size: 3, extra: constsize, typ: ExtensionType}, + mfixext2: {size: 4, extra: constsize, typ: ExtensionType}, + mfixext4: {size: 6, extra: constsize, typ: ExtensionType}, + mfixext8: {size: 10, extra: constsize, typ: ExtensionType}, + mfixext16: {size: 18, extra: constsize, typ: ExtensionType}, + mstr8: {size: 2, extra: extra8, typ: StrType}, + mstr16: {size: 3, extra: extra16, typ: StrType}, + mstr32: {size: 5, extra: extra32, typ: StrType}, + marray16: {size: 3, extra: array16v, typ: ArrayType}, + marray32: {size: 5, extra: array32v, typ: ArrayType}, + mmap16: {size: 3, extra: map16v, typ: MapType}, + mmap32: {size: 5, extra: map32v, typ: MapType}, +} + +func init() { + // set up fixed fields + + // fixint + for i := mfixint; i < 0x80; i++ { + sizes[i] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // nfixint + for i := uint16(mnfixint); i < 0x100; i++ { + sizes[uint8(i)] = bytespec{size: 1, extra: constsize, typ: IntType} + } + + // fixstr gets constsize, + // since the prefix yields the size + for i := mfixstr; i < 0xc0; i++ { + sizes[i] = bytespec{size: 1 + rfixstr(i), extra: constsize, typ: StrType} + } + + // fixmap + for i := mfixmap; i < 0x90; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(2 * rfixmap(i)), typ: MapType} + } + + // fixarray + for i := mfixarray; i < 0xa0; i++ { + sizes[i] = bytespec{size: 1, extra: varmode(rfixarray(i)), typ: ArrayType} + } +} + +// a valid bytespsec has +// non-zero 'size' and +// non-zero 'typ' +type bytespec struct { + size uint8 // prefix size information + extra varmode // extra size information + typ Type // type + _ byte // makes bytespec 4 bytes (yes, this matters) +} + +// size mode +// if positive, # elements for composites +type varmode int8 + +const ( + constsize varmode = 0 // constant size (size bytes + uint8(varmode) objects) + extra8 = -1 // has uint8(p[1]) extra bytes + extra16 = -2 // has be16(p[1:]) extra bytes + extra32 = -3 // has be32(p[1:]) extra bytes + map16v = -4 // use map16 + map32v = -5 // use map32 + array16v = -6 // use array16 + array32v = -7 // use array32 +) + +func getType(v byte) Type { + return sizes[v].typ +} diff --git a/vendor/github.com/tinylib/msgp/msgp/errors.go b/vendor/github.com/tinylib/msgp/msgp/errors.go new file mode 100644 index 0000000000..5c24f27103 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/errors.go @@ -0,0 +1,142 @@ +package msgp + +import ( + "fmt" + "reflect" +) + +var ( + // ErrShortBytes is returned when the + // slice being decoded is too short to + // contain the contents of the message + ErrShortBytes error = errShort{} + + // this error is only returned + // if we reach code that should + // be unreachable + fatal error = errFatal{} +) + +// Error is the interface satisfied +// by all of the errors that originate +// from this package. +type Error interface { + error + + // Resumable returns whether + // or not the error means that + // the stream of data is malformed + // and the information is unrecoverable. + Resumable() bool +} + +type errShort struct{} + +func (e errShort) Error() string { return "msgp: too few bytes left to read object" } +func (e errShort) Resumable() bool { return false } + +type errFatal struct{} + +func (f errFatal) Error() string { return "msgp: fatal decoding error (unreachable code)" } +func (f errFatal) Resumable() bool { return false } + +// ArrayError is an error returned +// when decoding a fix-sized array +// of the wrong size +type ArrayError struct { + Wanted uint32 + Got uint32 +} + +// Error implements the error interface +func (a ArrayError) Error() string { + return fmt.Sprintf("msgp: wanted array of size %d; got %d", a.Wanted, a.Got) +} + +// Resumable is always 'true' for ArrayErrors +func (a ArrayError) Resumable() bool { return true } + +// IntOverflow is returned when a call +// would downcast an integer to a type +// with too few bits to hold its value. +type IntOverflow struct { + Value int64 // the value of the integer + FailedBitsize int // the bit size that the int64 could not fit into +} + +// Error implements the error interface +func (i IntOverflow) Error() string { + return fmt.Sprintf("msgp: %d overflows int%d", i.Value, i.FailedBitsize) +} + +// Resumable is always 'true' for overflows +func (i IntOverflow) Resumable() bool { return true } + +// UintOverflow is returned when a call +// would downcast an unsigned integer to a type +// with too few bits to hold its value +type UintOverflow struct { + Value uint64 // value of the uint + FailedBitsize int // the bit size that couldn't fit the value +} + +// Error implements the error interface +func (u UintOverflow) Error() string { + return fmt.Sprintf("msgp: %d overflows uint%d", u.Value, u.FailedBitsize) +} + +// Resumable is always 'true' for overflows +func (u UintOverflow) Resumable() bool { return true } + +// A TypeError is returned when a particular +// decoding method is unsuitable for decoding +// a particular MessagePack value. +type TypeError struct { + Method Type // Type expected by method + Encoded Type // Type actually encoded +} + +// Error implements the error interface +func (t TypeError) Error() string { + return fmt.Sprintf("msgp: attempted to decode type %q with method for %q", t.Encoded, t.Method) +} + +// Resumable returns 'true' for TypeErrors +func (t TypeError) Resumable() bool { return true } + +// returns either InvalidPrefixError or +// TypeError depending on whether or not +// the prefix is recognized +func badPrefix(want Type, lead byte) error { + t := sizes[lead].typ + if t == InvalidType { + return InvalidPrefixError(lead) + } + return TypeError{Method: want, Encoded: t} +} + +// InvalidPrefixError is returned when a bad encoding +// uses a prefix that is not recognized in the MessagePack standard. +// This kind of error is unrecoverable. +type InvalidPrefixError byte + +// Error implements the error interface +func (i InvalidPrefixError) Error() string { + return fmt.Sprintf("msgp: unrecognized type prefix 0x%x", byte(i)) +} + +// Resumable returns 'false' for InvalidPrefixErrors +func (i InvalidPrefixError) Resumable() bool { return false } + +// ErrUnsupportedType is returned +// when a bad argument is supplied +// to a function that takes `interface{}`. +type ErrUnsupportedType struct { + T reflect.Type +} + +// Error implements error +func (e *ErrUnsupportedType) Error() string { return fmt.Sprintf("msgp: type %q not supported", e.T) } + +// Resumable returns 'true' for ErrUnsupportedType +func (e *ErrUnsupportedType) Resumable() bool { return true } diff --git a/vendor/github.com/tinylib/msgp/msgp/extension.go b/vendor/github.com/tinylib/msgp/msgp/extension.go new file mode 100644 index 0000000000..588b18f95b --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/extension.go @@ -0,0 +1,548 @@ +package msgp + +import ( + "fmt" + "math" +) + +const ( + // Complex64Extension is the extension number used for complex64 + Complex64Extension = 3 + + // Complex128Extension is the extension number used for complex128 + Complex128Extension = 4 + + // TimeExtension is the extension number used for time.Time + TimeExtension = 5 +) + +// our extensions live here +var extensionReg = make(map[int8]func() Extension) + +// RegisterExtension registers extensions so that they +// can be initialized and returned by methods that +// decode `interface{}` values. This should only +// be called during initialization. f() should return +// a newly-initialized zero value of the extension. Keep in +// mind that extensions 3, 4, and 5 are reserved for +// complex64, complex128, and time.Time, respectively, +// and that MessagePack reserves extension types from -127 to -1. +// +// For example, if you wanted to register a user-defined struct: +// +// msgp.RegisterExtension(10, func() msgp.Extension { &MyExtension{} }) +// +// RegisterExtension will panic if you call it multiple times +// with the same 'typ' argument, or if you use a reserved +// type (3, 4, or 5). +func RegisterExtension(typ int8, f func() Extension) { + switch typ { + case Complex64Extension, Complex128Extension, TimeExtension: + panic(fmt.Sprint("msgp: forbidden extension type:", typ)) + } + if _, ok := extensionReg[typ]; ok { + panic(fmt.Sprint("msgp: RegisterExtension() called with typ", typ, "more than once")) + } + extensionReg[typ] = f +} + +// ExtensionTypeError is an error type returned +// when there is a mis-match between an extension type +// and the type encoded on the wire +type ExtensionTypeError struct { + Got int8 + Want int8 +} + +// Error implements the error interface +func (e ExtensionTypeError) Error() string { + return fmt.Sprintf("msgp: error decoding extension: wanted type %d; got type %d", e.Want, e.Got) +} + +// Resumable returns 'true' for ExtensionTypeErrors +func (e ExtensionTypeError) Resumable() bool { return true } + +func errExt(got int8, wanted int8) error { + return ExtensionTypeError{Got: got, Want: wanted} +} + +// Extension is the interface fulfilled +// by types that want to define their +// own binary encoding. +type Extension interface { + // ExtensionType should return + // a int8 that identifies the concrete + // type of the extension. (Types <0 are + // officially reserved by the MessagePack + // specifications.) + ExtensionType() int8 + + // Len should return the length + // of the data to be encoded + Len() int + + // MarshalBinaryTo should copy + // the data into the supplied slice, + // assuming that the slice has length Len() + MarshalBinaryTo([]byte) error + + UnmarshalBinary([]byte) error +} + +// RawExtension implements the Extension interface +type RawExtension struct { + Data []byte + Type int8 +} + +// ExtensionType implements Extension.ExtensionType, and returns r.Type +func (r *RawExtension) ExtensionType() int8 { return r.Type } + +// Len implements Extension.Len, and returns len(r.Data) +func (r *RawExtension) Len() int { return len(r.Data) } + +// MarshalBinaryTo implements Extension.MarshalBinaryTo, +// and returns a copy of r.Data +func (r *RawExtension) MarshalBinaryTo(d []byte) error { + copy(d, r.Data) + return nil +} + +// UnmarshalBinary implements Extension.UnmarshalBinary, +// and sets r.Data to the contents of the provided slice +func (r *RawExtension) UnmarshalBinary(b []byte) error { + if cap(r.Data) >= len(b) { + r.Data = r.Data[0:len(b)] + } else { + r.Data = make([]byte, len(b)) + } + copy(r.Data, b) + return nil +} + +// WriteExtension writes an extension type to the writer +func (mw *Writer) WriteExtension(e Extension) error { + l := e.Len() + var err error + switch l { + case 0: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 0 + mw.buf[o+2] = byte(e.ExtensionType()) + case 1: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext1 + mw.buf[o+1] = byte(e.ExtensionType()) + case 2: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext2 + mw.buf[o+1] = byte(e.ExtensionType()) + case 4: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext4 + mw.buf[o+1] = byte(e.ExtensionType()) + case 8: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = byte(e.ExtensionType()) + case 16: + o, err := mw.require(2) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = byte(e.ExtensionType()) + default: + switch { + case l < math.MaxUint8: + o, err := mw.require(3) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = byte(uint8(l)) + mw.buf[o+2] = byte(e.ExtensionType()) + case l < math.MaxUint16: + o, err := mw.require(4) + if err != nil { + return err + } + mw.buf[o] = mext16 + big.PutUint16(mw.buf[o+1:], uint16(l)) + mw.buf[o+3] = byte(e.ExtensionType()) + default: + o, err := mw.require(6) + if err != nil { + return err + } + mw.buf[o] = mext32 + big.PutUint32(mw.buf[o+1:], uint32(l)) + mw.buf[o+5] = byte(e.ExtensionType()) + } + } + // we can only write directly to the + // buffer if we're sure that it + // fits the object + if l <= mw.bufsize() { + o, err := mw.require(l) + if err != nil { + return err + } + return e.MarshalBinaryTo(mw.buf[o:]) + } + // here we create a new buffer + // just large enough for the body + // and save it as the write buffer + err = mw.flush() + if err != nil { + return err + } + buf := make([]byte, l) + err = e.MarshalBinaryTo(buf) + if err != nil { + return err + } + mw.buf = buf + mw.wloc = l + return nil +} + +// peek at the extension type, assuming the next +// kind to be read is Extension +func (m *Reader) peekExtensionType() (int8, error) { + p, err := m.R.Peek(2) + if err != nil { + return 0, err + } + spec := sizes[p[0]] + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, p[0]) + } + if spec.extra == constsize { + return int8(p[1]), nil + } + size := spec.size + p, err = m.R.Peek(int(size)) + if err != nil { + return 0, err + } + return int8(p[size-1]), nil +} + +// peekExtension peeks at the extension encoding type +// (must guarantee at least 1 byte in 'b') +func peekExtension(b []byte) (int8, error) { + spec := sizes[b[0]] + size := spec.size + if spec.typ != ExtensionType { + return 0, badPrefix(ExtensionType, b[0]) + } + if len(b) < int(size) { + return 0, ErrShortBytes + } + // for fixed extensions, + // the type information is in + // the second byte + if spec.extra == constsize { + return int8(b[1]), nil + } + // otherwise, it's in the last + // part of the prefix + return int8(b[size-1]), nil +} + +// ReadExtension reads the next object from the reader +// as an extension. ReadExtension will fail if the next +// object in the stream is not an extension, or if +// e.Type() is not the same as the wire type. +func (m *Reader) ReadExtension(e Extension) (err error) { + var p []byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead := p[0] + var read int + var off int + switch lead { + case mfixext1: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(3) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(3) + } + return + + case mfixext2: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(4) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(4) + } + return + + case mfixext4: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(6) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(6) + } + return + + case mfixext8: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(10) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(10) + } + return + + case mfixext16: + if int8(p[1]) != e.ExtensionType() { + err = errExt(int8(p[1]), e.ExtensionType()) + return + } + p, err = m.R.Peek(18) + if err != nil { + return + } + err = e.UnmarshalBinary(p[2:]) + if err == nil { + _, err = m.R.Skip(18) + } + return + + case mext8: + p, err = m.R.Peek(3) + if err != nil { + return + } + if int8(p[2]) != e.ExtensionType() { + err = errExt(int8(p[2]), e.ExtensionType()) + return + } + read = int(uint8(p[1])) + off = 3 + + case mext16: + p, err = m.R.Peek(4) + if err != nil { + return + } + if int8(p[3]) != e.ExtensionType() { + err = errExt(int8(p[3]), e.ExtensionType()) + return + } + read = int(big.Uint16(p[1:])) + off = 4 + + case mext32: + p, err = m.R.Peek(6) + if err != nil { + return + } + if int8(p[5]) != e.ExtensionType() { + err = errExt(int8(p[5]), e.ExtensionType()) + return + } + read = int(big.Uint32(p[1:])) + off = 6 + + default: + err = badPrefix(ExtensionType, lead) + return + } + + p, err = m.R.Peek(read + off) + if err != nil { + return + } + err = e.UnmarshalBinary(p[off:]) + if err == nil { + _, err = m.R.Skip(read + off) + } + return +} + +// AppendExtension appends a MessagePack extension to the provided slice +func AppendExtension(b []byte, e Extension) ([]byte, error) { + l := e.Len() + var o []byte + var n int + switch l { + case 0: + o, n = ensure(b, 3) + o[n] = mext8 + o[n+1] = 0 + o[n+2] = byte(e.ExtensionType()) + return o[:n+3], nil + case 1: + o, n = ensure(b, 3) + o[n] = mfixext1 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 2: + o, n = ensure(b, 4) + o[n] = mfixext2 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 4: + o, n = ensure(b, 6) + o[n] = mfixext4 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 8: + o, n = ensure(b, 10) + o[n] = mfixext8 + o[n+1] = byte(e.ExtensionType()) + n += 2 + case 16: + o, n = ensure(b, 18) + o[n] = mfixext16 + o[n+1] = byte(e.ExtensionType()) + n += 2 + } + switch { + case l < math.MaxUint8: + o, n = ensure(b, l+3) + o[n] = mext8 + o[n+1] = byte(uint8(l)) + o[n+2] = byte(e.ExtensionType()) + n += 3 + case l < math.MaxUint16: + o, n = ensure(b, l+4) + o[n] = mext16 + big.PutUint16(o[n+1:], uint16(l)) + o[n+3] = byte(e.ExtensionType()) + n += 4 + default: + o, n = ensure(b, l+6) + o[n] = mext32 + big.PutUint32(o[n+1:], uint32(l)) + o[n+5] = byte(e.ExtensionType()) + n += 6 + } + return o, e.MarshalBinaryTo(o[n:]) +} + +// ReadExtensionBytes reads an extension from 'b' into 'e' +// and returns any remaining bytes. +// Possible errors: +// - ErrShortBytes ('b' not long enough) +// - ExtensionTypeErorr{} (wire type not the same as e.Type()) +// - TypeErorr{} (next object not an extension) +// - InvalidPrefixError +// - An umarshal error returned from e.UnmarshalBinary +func ReadExtensionBytes(b []byte, e Extension) ([]byte, error) { + l := len(b) + if l < 3 { + return b, ErrShortBytes + } + lead := b[0] + var ( + sz int // size of 'data' + off int // offset of 'data' + typ int8 + ) + switch lead { + case mfixext1: + typ = int8(b[1]) + sz = 1 + off = 2 + case mfixext2: + typ = int8(b[1]) + sz = 2 + off = 2 + case mfixext4: + typ = int8(b[1]) + sz = 4 + off = 2 + case mfixext8: + typ = int8(b[1]) + sz = 8 + off = 2 + case mfixext16: + typ = int8(b[1]) + sz = 16 + off = 2 + case mext8: + sz = int(uint8(b[1])) + typ = int8(b[2]) + off = 3 + if sz == 0 { + return b[3:], e.UnmarshalBinary(b[3:3]) + } + case mext16: + if l < 4 { + return b, ErrShortBytes + } + sz = int(big.Uint16(b[1:])) + typ = int8(b[3]) + off = 4 + case mext32: + if l < 6 { + return b, ErrShortBytes + } + sz = int(big.Uint32(b[1:])) + typ = int8(b[5]) + off = 6 + default: + return b, badPrefix(ExtensionType, lead) + } + + if typ != e.ExtensionType() { + return b, errExt(typ, e.ExtensionType()) + } + + // the data of the extension starts + // at 'off' and is 'sz' bytes long + if len(b[off:]) < sz { + return b, ErrShortBytes + } + tot := off + sz + return b[tot:], e.UnmarshalBinary(b[off:tot]) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file.go b/vendor/github.com/tinylib/msgp/msgp/file.go new file mode 100644 index 0000000000..cbbf56cd59 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file.go @@ -0,0 +1,91 @@ +// +build linux,!appengine darwin dragonfly freebsd netbsd openbsd + +package msgp + +import ( + "os" + "syscall" +) + +// ReadFile reads a file into 'dst' using +// a read-only memory mapping. Consequently, +// the file must be mmap-able, and the +// Unmarshaler should never write to +// the source memory. (Methods generated +// by the msgp tool obey that constraint, but +// user-defined implementations may not.) +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +func ReadFile(dst Unmarshaler, file *os.File) error { + stat, err := file.Stat() + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, int(stat.Size()), syscall.PROT_READ, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseRead(data) + _, err = dst.UnmarshalMsg(data) + uerr := syscall.Munmap(data) + if err == nil { + err = uerr + } + return err +} + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +// WriteFile writes a file from 'src' using +// memory mapping. It overwrites the entire +// contents of the previous file. +// The mapping size is calculated +// using the `Msgsize()` method +// of 'src', so it must produce a result +// equal to or greater than the actual encoded +// size of the object. Otherwise, +// a fault (SIGBUS) will occur. +// +// Reading and writing through file mappings +// is only efficient for large files; small +// files are best read and written using +// the ordinary streaming interfaces. +// +// NOTE: The performance of this call +// is highly OS- and filesystem-dependent. +// Users should take care to test that this +// performs as expected in a production environment. +// (Linux users should run a kernel and filesystem +// that support fallocate(2) for the best results.) +func WriteFile(src MarshalSizer, file *os.File) error { + sz := src.Msgsize() + err := fallocate(file, int64(sz)) + if err != nil { + return err + } + data, err := syscall.Mmap(int(file.Fd()), 0, sz, syscall.PROT_READ|syscall.PROT_WRITE, syscall.MAP_SHARED) + if err != nil { + return err + } + adviseWrite(data) + chunk := data[:0] + chunk, err = src.MarshalMsg(chunk) + if err != nil { + return err + } + uerr := syscall.Munmap(data) + if uerr != nil { + return uerr + } + return file.Truncate(int64(len(chunk))) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/file_port.go b/vendor/github.com/tinylib/msgp/msgp/file_port.go new file mode 100644 index 0000000000..6e654dbdc2 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/file_port.go @@ -0,0 +1,47 @@ +// +build windows appengine + +package msgp + +import ( + "io/ioutil" + "os" +) + +// MarshalSizer is the combination +// of the Marshaler and Sizer +// interfaces. +type MarshalSizer interface { + Marshaler + Sizer +} + +func ReadFile(dst Unmarshaler, file *os.File) error { + if u, ok := dst.(Decodable); ok { + return u.DecodeMsg(NewReader(file)) + } + + data, err := ioutil.ReadAll(file) + if err != nil { + return err + } + _, err = dst.UnmarshalMsg(data) + return err +} + +func WriteFile(src MarshalSizer, file *os.File) error { + if e, ok := src.(Encodable); ok { + w := NewWriter(file) + err := e.EncodeMsg(w) + if err == nil { + err = w.Flush() + } + return err + } + + raw, err := src.MarshalMsg(nil) + if err != nil { + return err + } + _, err = file.Write(raw) + return err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/integers.go b/vendor/github.com/tinylib/msgp/msgp/integers.go new file mode 100644 index 0000000000..f817d77598 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/integers.go @@ -0,0 +1,174 @@ +package msgp + +/* ---------------------------------- + integer encoding utilities + (inline-able) + + TODO(tinylib): there are faster, + albeit non-portable solutions + to the code below. implement + byteswap? + ---------------------------------- */ + +func putMint64(b []byte, i int64) { + b[0] = mint64 + b[1] = byte(i >> 56) + b[2] = byte(i >> 48) + b[3] = byte(i >> 40) + b[4] = byte(i >> 32) + b[5] = byte(i >> 24) + b[6] = byte(i >> 16) + b[7] = byte(i >> 8) + b[8] = byte(i) +} + +func getMint64(b []byte) int64 { + return (int64(b[1]) << 56) | (int64(b[2]) << 48) | + (int64(b[3]) << 40) | (int64(b[4]) << 32) | + (int64(b[5]) << 24) | (int64(b[6]) << 16) | + (int64(b[7]) << 8) | (int64(b[8])) +} + +func putMint32(b []byte, i int32) { + b[0] = mint32 + b[1] = byte(i >> 24) + b[2] = byte(i >> 16) + b[3] = byte(i >> 8) + b[4] = byte(i) +} + +func getMint32(b []byte) int32 { + return (int32(b[1]) << 24) | (int32(b[2]) << 16) | (int32(b[3]) << 8) | (int32(b[4])) +} + +func putMint16(b []byte, i int16) { + b[0] = mint16 + b[1] = byte(i >> 8) + b[2] = byte(i) +} + +func getMint16(b []byte) (i int16) { + return (int16(b[1]) << 8) | int16(b[2]) +} + +func putMint8(b []byte, i int8) { + b[0] = mint8 + b[1] = byte(i) +} + +func getMint8(b []byte) (i int8) { + return int8(b[1]) +} + +func putMuint64(b []byte, u uint64) { + b[0] = muint64 + b[1] = byte(u >> 56) + b[2] = byte(u >> 48) + b[3] = byte(u >> 40) + b[4] = byte(u >> 32) + b[5] = byte(u >> 24) + b[6] = byte(u >> 16) + b[7] = byte(u >> 8) + b[8] = byte(u) +} + +func getMuint64(b []byte) uint64 { + return (uint64(b[1]) << 56) | (uint64(b[2]) << 48) | + (uint64(b[3]) << 40) | (uint64(b[4]) << 32) | + (uint64(b[5]) << 24) | (uint64(b[6]) << 16) | + (uint64(b[7]) << 8) | (uint64(b[8])) +} + +func putMuint32(b []byte, u uint32) { + b[0] = muint32 + b[1] = byte(u >> 24) + b[2] = byte(u >> 16) + b[3] = byte(u >> 8) + b[4] = byte(u) +} + +func getMuint32(b []byte) uint32 { + return (uint32(b[1]) << 24) | (uint32(b[2]) << 16) | (uint32(b[3]) << 8) | (uint32(b[4])) +} + +func putMuint16(b []byte, u uint16) { + b[0] = muint16 + b[1] = byte(u >> 8) + b[2] = byte(u) +} + +func getMuint16(b []byte) uint16 { + return (uint16(b[1]) << 8) | uint16(b[2]) +} + +func putMuint8(b []byte, u uint8) { + b[0] = muint8 + b[1] = byte(u) +} + +func getMuint8(b []byte) uint8 { + return uint8(b[1]) +} + +func getUnix(b []byte) (sec int64, nsec int32) { + sec = (int64(b[0]) << 56) | (int64(b[1]) << 48) | + (int64(b[2]) << 40) | (int64(b[3]) << 32) | + (int64(b[4]) << 24) | (int64(b[5]) << 16) | + (int64(b[6]) << 8) | (int64(b[7])) + + nsec = (int32(b[8]) << 24) | (int32(b[9]) << 16) | (int32(b[10]) << 8) | (int32(b[11])) + return +} + +func putUnix(b []byte, sec int64, nsec int32) { + b[0] = byte(sec >> 56) + b[1] = byte(sec >> 48) + b[2] = byte(sec >> 40) + b[3] = byte(sec >> 32) + b[4] = byte(sec >> 24) + b[5] = byte(sec >> 16) + b[6] = byte(sec >> 8) + b[7] = byte(sec) + b[8] = byte(nsec >> 24) + b[9] = byte(nsec >> 16) + b[10] = byte(nsec >> 8) + b[11] = byte(nsec) +} + +/* ----------------------------- + prefix utilities + ----------------------------- */ + +// write prefix and uint8 +func prefixu8(b []byte, pre byte, sz uint8) { + b[0] = pre + b[1] = byte(sz) +} + +// write prefix and big-endian uint16 +func prefixu16(b []byte, pre byte, sz uint16) { + b[0] = pre + b[1] = byte(sz >> 8) + b[2] = byte(sz) +} + +// write prefix and big-endian uint32 +func prefixu32(b []byte, pre byte, sz uint32) { + b[0] = pre + b[1] = byte(sz >> 24) + b[2] = byte(sz >> 16) + b[3] = byte(sz >> 8) + b[4] = byte(sz) +} + +func prefixu64(b []byte, pre byte, sz uint64) { + b[0] = pre + b[1] = byte(sz >> 56) + b[2] = byte(sz >> 48) + b[3] = byte(sz >> 40) + b[4] = byte(sz >> 32) + b[5] = byte(sz >> 24) + b[6] = byte(sz >> 16) + b[7] = byte(sz >> 8) + b[8] = byte(sz) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json.go b/vendor/github.com/tinylib/msgp/msgp/json.go new file mode 100644 index 0000000000..4325860ada --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json.go @@ -0,0 +1,542 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "unicode/utf8" +) + +var ( + null = []byte("null") + hex = []byte("0123456789abcdef") +) + +var defuns [_maxtype]func(jsWriter, *Reader) (int, error) + +// note: there is an initialization loop if +// this isn't set up during init() +func init() { + // since none of these functions are inline-able, + // there is not much of a penalty to the indirect + // call. however, this is best expressed as a jump-table... + defuns = [_maxtype]func(jsWriter, *Reader) (int, error){ + StrType: rwString, + BinType: rwBytes, + MapType: rwMap, + ArrayType: rwArray, + Float64Type: rwFloat64, + Float32Type: rwFloat32, + BoolType: rwBool, + IntType: rwInt, + UintType: rwUint, + NilType: rwNil, + ExtensionType: rwExtension, + Complex64Type: rwExtension, + Complex128Type: rwExtension, + TimeType: rwTime, + } +} + +// this is the interface +// used to write json +type jsWriter interface { + io.Writer + io.ByteWriter + WriteString(string) (int, error) +} + +// CopyToJSON reads MessagePack from 'src' and copies it +// as JSON to 'dst' until EOF. +func CopyToJSON(dst io.Writer, src io.Reader) (n int64, err error) { + r := NewReader(src) + n, err = r.WriteToJSON(dst) + freeR(r) + return +} + +// WriteToJSON translates MessagePack from 'r' and writes it as +// JSON to 'w' until the underlying reader returns io.EOF. It returns +// the number of bytes written, and an error if it stopped before EOF. +func (r *Reader) WriteToJSON(w io.Writer) (n int64, err error) { + var j jsWriter + var bf *bufio.Writer + if jsw, ok := w.(jsWriter); ok { + j = jsw + } else { + bf = bufio.NewWriter(w) + j = bf + } + var nn int + for err == nil { + nn, err = rwNext(j, r) + n += int64(nn) + } + if err != io.EOF { + if bf != nil { + bf.Flush() + } + return + } + err = nil + if bf != nil { + err = bf.Flush() + } + return +} + +func rwNext(w jsWriter, src *Reader) (int, error) { + t, err := src.NextType() + if err != nil { + return 0, err + } + return defuns[t](w, src) +} + +func rwMap(dst jsWriter, src *Reader) (n int, err error) { + var comma bool + var sz uint32 + var field []byte + + sz, err = src.ReadMapHeader() + if err != nil { + return + } + + if sz == 0 { + return dst.WriteString("{}") + } + + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + var nn int + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + + field, err = src.ReadMapKeyPtr() + if err != nil { + return + } + nn, err = rwquoted(dst, field) + n += nn + if err != nil { + return + } + + err = dst.WriteByte(':') + if err != nil { + return + } + n++ + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + if !comma { + comma = true + } + } + + err = dst.WriteByte('}') + if err != nil { + return + } + n++ + return +} + +func rwArray(dst jsWriter, src *Reader) (n int, err error) { + err = dst.WriteByte('[') + if err != nil { + return + } + var sz uint32 + var nn int + sz, err = src.ReadArrayHeader() + if err != nil { + return + } + comma := false + for i := uint32(0); i < sz; i++ { + if comma { + err = dst.WriteByte(',') + if err != nil { + return + } + n++ + } + nn, err = rwNext(dst, src) + n += nn + if err != nil { + return + } + comma = true + } + + err = dst.WriteByte(']') + if err != nil { + return + } + n++ + return +} + +func rwNil(dst jsWriter, src *Reader) (int, error) { + err := src.ReadNil() + if err != nil { + return 0, err + } + return dst.Write(null) +} + +func rwFloat32(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat32() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], float64(f), 'f', -1, 64) + return dst.Write(src.scratch) +} + +func rwFloat64(dst jsWriter, src *Reader) (int, error) { + f, err := src.ReadFloat64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendFloat(src.scratch[:0], f, 'f', -1, 32) + return dst.Write(src.scratch) +} + +func rwInt(dst jsWriter, src *Reader) (int, error) { + i, err := src.ReadInt64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendInt(src.scratch[:0], i, 10) + return dst.Write(src.scratch) +} + +func rwUint(dst jsWriter, src *Reader) (int, error) { + u, err := src.ReadUint64() + if err != nil { + return 0, err + } + src.scratch = strconv.AppendUint(src.scratch[:0], u, 10) + return dst.Write(src.scratch) +} + +func rwBool(dst jsWriter, src *Reader) (int, error) { + b, err := src.ReadBool() + if err != nil { + return 0, err + } + if b { + return dst.WriteString("true") + } + return dst.WriteString("false") +} + +func rwTime(dst jsWriter, src *Reader) (int, error) { + t, err := src.ReadTime() + if err != nil { + return 0, err + } + bts, err := t.MarshalJSON() + if err != nil { + return 0, err + } + return dst.Write(bts) +} + +func rwExtension(dst jsWriter, src *Reader) (n int, err error) { + et, err := src.peekExtensionType() + if err != nil { + return 0, err + } + + // registered extensions can override + // the JSON encoding + if j, ok := extensionReg[et]; ok { + var bts []byte + e := j() + err = src.ReadExtension(e) + if err != nil { + return + } + bts, err = json.Marshal(e) + if err != nil { + return + } + return dst.Write(bts) + } + + e := RawExtension{} + e.Type = et + err = src.ReadExtension(&e) + if err != nil { + return + } + + var nn int + err = dst.WriteByte('{') + if err != nil { + return + } + n++ + + nn, err = dst.WriteString(`"type:"`) + n += nn + if err != nil { + return + } + + src.scratch = strconv.AppendInt(src.scratch[0:0], int64(e.Type), 10) + nn, err = dst.Write(src.scratch) + n += nn + if err != nil { + return + } + + nn, err = dst.WriteString(`,"data":"`) + n += nn + if err != nil { + return + } + + enc := base64.NewEncoder(base64.StdEncoding, dst) + + nn, err = enc.Write(e.Data) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + nn, err = dst.WriteString(`"}`) + n += nn + return +} + +func rwString(dst jsWriter, src *Reader) (n int, err error) { + var p []byte + p, err = src.R.Peek(1) + if err != nil { + return + } + lead := p[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + src.R.Skip(1) + goto write + } + + switch lead { + case mstr8: + p, err = src.R.Next(2) + if err != nil { + return + } + read = int(uint8(p[1])) + case mstr16: + p, err = src.R.Next(3) + if err != nil { + return + } + read = int(big.Uint16(p[1:])) + case mstr32: + p, err = src.R.Next(5) + if err != nil { + return + } + read = int(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +write: + p, err = src.R.Next(read) + if err != nil { + return + } + n, err = rwquoted(dst, p) + return +} + +func rwBytes(dst jsWriter, src *Reader) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + src.scratch, err = src.ReadBytes(src.scratch[:0]) + if err != nil { + return + } + enc := base64.NewEncoder(base64.StdEncoding, dst) + nn, err = enc.Write(src.scratch) + n += nn + if err != nil { + return + } + err = enc.Close() + if err != nil { + return + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} + +// Below (c) The Go Authors, 2009-2014 +// Subject to the BSD-style license found at http://golang.org +// +// see: encoding/json/encode.go:(*encodeState).stringbytes() +func rwquoted(dst jsWriter, s []byte) (n int, err error) { + var nn int + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { + i++ + continue + } + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + } + switch b { + case '\\', '"': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte(b) + if err != nil { + return + } + n++ + case '\n': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('n') + if err != nil { + return + } + n++ + case '\r': + err = dst.WriteByte('\\') + if err != nil { + return + } + n++ + err = dst.WriteByte('r') + if err != nil { + return + } + n++ + default: + nn, err = dst.WriteString(`\u00`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[b>>4]) + if err != nil { + return + } + n++ + err = dst.WriteByte(hex[b&0xF]) + if err != nil { + return + } + n++ + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError && size == 1 { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + nn, err = dst.WriteString(`\ufffd`) + n += nn + if err != nil { + return + } + i += size + start = i + continue + } + } + if c == '\u2028' || c == '\u2029' { + if start < i { + nn, err = dst.Write(s[start:i]) + n += nn + if err != nil { + return + } + nn, err = dst.WriteString(`\u202`) + n += nn + if err != nil { + return + } + err = dst.WriteByte(hex[c&0xF]) + if err != nil { + return + } + n++ + } + } + i += size + } + if start < len(s) { + nn, err = dst.Write(s[start:]) + n += nn + if err != nil { + return + } + } + err = dst.WriteByte('"') + if err != nil { + return + } + n++ + return +} diff --git a/vendor/github.com/tinylib/msgp/msgp/json_bytes.go b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go new file mode 100644 index 0000000000..438caf5392 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/json_bytes.go @@ -0,0 +1,363 @@ +package msgp + +import ( + "bufio" + "encoding/base64" + "encoding/json" + "io" + "strconv" + "time" +) + +var unfuns [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error) + +func init() { + + // NOTE(pmh): this is best expressed as a jump table, + // but gc doesn't do that yet. revisit post-go1.5. + unfuns = [_maxtype]func(jsWriter, []byte, []byte) ([]byte, []byte, error){ + StrType: rwStringBytes, + BinType: rwBytesBytes, + MapType: rwMapBytes, + ArrayType: rwArrayBytes, + Float64Type: rwFloat64Bytes, + Float32Type: rwFloat32Bytes, + BoolType: rwBoolBytes, + IntType: rwIntBytes, + UintType: rwUintBytes, + NilType: rwNullBytes, + ExtensionType: rwExtensionBytes, + Complex64Type: rwExtensionBytes, + Complex128Type: rwExtensionBytes, + TimeType: rwTimeBytes, + } +} + +// UnmarshalAsJSON takes raw messagepack and writes +// it as JSON to 'w'. If an error is returned, the +// bytes not translated will also be returned. If +// no errors are encountered, the length of the returned +// slice will be zero. +func UnmarshalAsJSON(w io.Writer, msg []byte) ([]byte, error) { + var ( + scratch []byte + cast bool + dst jsWriter + err error + ) + if jsw, ok := w.(jsWriter); ok { + dst = jsw + cast = true + } else { + dst = bufio.NewWriterSize(w, 512) + } + for len(msg) > 0 && err == nil { + msg, scratch, err = writeNext(dst, msg, scratch) + } + if !cast && err == nil { + err = dst.(*bufio.Writer).Flush() + } + return msg, err +} + +func writeNext(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + if len(msg) < 1 { + return msg, scratch, ErrShortBytes + } + t := getType(msg[0]) + if t == InvalidType { + return msg, scratch, InvalidPrefixError(msg[0]) + } + if t == ExtensionType { + et, err := peekExtension(msg) + if err != nil { + return nil, scratch, err + } + if et == TimeExtension { + t = TimeType + } + } + return unfuns[t](w, msg, scratch) +} + +func rwArrayBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadArrayHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('[') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte(']') + return msg, scratch, err +} + +func rwMapBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + sz, msg, err := ReadMapHeaderBytes(msg) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('{') + if err != nil { + return msg, scratch, err + } + for i := uint32(0); i < sz; i++ { + if i != 0 { + err = w.WriteByte(',') + if err != nil { + return msg, scratch, err + } + } + msg, scratch, err = rwMapKeyBytes(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte(':') + if err != nil { + return msg, scratch, err + } + msg, scratch, err = writeNext(w, msg, scratch) + if err != nil { + return msg, scratch, err + } + } + err = w.WriteByte('}') + return msg, scratch, err +} + +func rwMapKeyBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, scratch, err := rwStringBytes(w, msg, scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return rwBytesBytes(w, msg, scratch) + } + } + return msg, scratch, err +} + +func rwStringBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + str, msg, err := ReadStringZC(msg) + if err != nil { + return msg, scratch, err + } + _, err = rwquoted(w, str) + return msg, scratch, err +} + +func rwBytesBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + bts, msg, err := ReadBytesZC(msg) + if err != nil { + return msg, scratch, err + } + l := base64.StdEncoding.EncodedLen(len(bts)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, bts) + err = w.WriteByte('"') + if err != nil { + return msg, scratch, err + } + _, err = w.Write(scratch) + if err != nil { + return msg, scratch, err + } + err = w.WriteByte('"') + return msg, scratch, err +} + +func rwNullBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + msg, err := ReadNilBytes(msg) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(null) + return msg, scratch, err +} + +func rwBoolBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + b, msg, err := ReadBoolBytes(msg) + if err != nil { + return msg, scratch, err + } + if b { + _, err = w.WriteString("true") + return msg, scratch, err + } + _, err = w.WriteString("false") + return msg, scratch, err +} + +func rwIntBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + i, msg, err := ReadInt64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], i, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwUintBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + u, msg, err := ReadUint64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendUint(scratch[0:0], u, 10) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloatBytes(w jsWriter, msg []byte, f64 bool, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + var sz int + if f64 { + sz = 64 + f, msg, err = ReadFloat64Bytes(msg) + } else { + sz = 32 + var v float32 + v, msg, err = ReadFloat32Bytes(msg) + f = float64(v) + } + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch, f, 'f', -1, sz) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat32Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float32 + var err error + f, msg, err = ReadFloat32Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], float64(f), 'f', -1, 32) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwFloat64Bytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var f float64 + var err error + f, msg, err = ReadFloat64Bytes(msg) + if err != nil { + return msg, scratch, err + } + scratch = strconv.AppendFloat(scratch[:0], f, 'f', -1, 64) + _, err = w.Write(scratch) + return msg, scratch, err +} + +func rwTimeBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var t time.Time + var err error + t, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := t.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err +} + +func rwExtensionBytes(w jsWriter, msg []byte, scratch []byte) ([]byte, []byte, error) { + var err error + var et int8 + et, err = peekExtension(msg) + if err != nil { + return msg, scratch, err + } + + // if it's time.Time + if et == TimeExtension { + var tm time.Time + tm, msg, err = ReadTimeBytes(msg) + if err != nil { + return msg, scratch, err + } + bts, err := tm.MarshalJSON() + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // if the extension is registered, + // use its canonical JSON form + if f, ok := extensionReg[et]; ok { + e := f() + msg, err = ReadExtensionBytes(msg, e) + if err != nil { + return msg, scratch, err + } + bts, err := json.Marshal(e) + if err != nil { + return msg, scratch, err + } + _, err = w.Write(bts) + return msg, scratch, err + } + + // otherwise, write `{"type": , "data": ""}` + r := RawExtension{} + r.Type = et + msg, err = ReadExtensionBytes(msg, &r) + if err != nil { + return msg, scratch, err + } + scratch, err = writeExt(w, r, scratch) + return msg, scratch, err +} + +func writeExt(w jsWriter, r RawExtension, scratch []byte) ([]byte, error) { + _, err := w.WriteString(`{"type":`) + if err != nil { + return scratch, err + } + scratch = strconv.AppendInt(scratch[0:0], int64(r.Type), 10) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`,"data":"`) + if err != nil { + return scratch, err + } + l := base64.StdEncoding.EncodedLen(len(r.Data)) + if cap(scratch) >= l { + scratch = scratch[0:l] + } else { + scratch = make([]byte, l) + } + base64.StdEncoding.Encode(scratch, r.Data) + _, err = w.Write(scratch) + if err != nil { + return scratch, err + } + _, err = w.WriteString(`"}`) + return scratch, err +} diff --git a/vendor/github.com/tinylib/msgp/msgp/number.go b/vendor/github.com/tinylib/msgp/msgp/number.go new file mode 100644 index 0000000000..ad07ef9958 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/number.go @@ -0,0 +1,267 @@ +package msgp + +import ( + "math" + "strconv" +) + +// The portable parts of the Number implementation + +// Number can be +// an int64, uint64, float32, +// or float64 internally. +// It can decode itself +// from any of the native +// messagepack number types. +// The zero-value of Number +// is Int(0). Using the equality +// operator with Number compares +// both the type and the value +// of the number. +type Number struct { + // internally, this + // is just a tagged union. + // the raw bits of the number + // are stored the same way regardless. + bits uint64 + typ Type +} + +// AsInt sets the number to an int64. +func (n *Number) AsInt(i int64) { + + // we always store int(0) + // as {0, InvalidType} in + // order to preserve + // the behavior of the == operator + if i == 0 { + n.typ = InvalidType + n.bits = 0 + return + } + + n.typ = IntType + n.bits = uint64(i) +} + +// AsUint sets the number to a uint64. +func (n *Number) AsUint(u uint64) { + n.typ = UintType + n.bits = u +} + +// AsFloat32 sets the value of the number +// to a float32. +func (n *Number) AsFloat32(f float32) { + n.typ = Float32Type + n.bits = uint64(math.Float32bits(f)) +} + +// AsFloat64 sets the value of the +// number to a float64. +func (n *Number) AsFloat64(f float64) { + n.typ = Float64Type + n.bits = math.Float64bits(f) +} + +// Int casts the number as an int64, and +// returns whether or not that was the +// underlying type. +func (n *Number) Int() (int64, bool) { + return int64(n.bits), n.typ == IntType || n.typ == InvalidType +} + +// Uint casts the number as a uint64, and returns +// whether or not that was the underlying type. +func (n *Number) Uint() (uint64, bool) { + return n.bits, n.typ == UintType +} + +// Float casts the number to a float64, and +// returns whether or not that was the underlying +// type (either a float64 or a float32). +func (n *Number) Float() (float64, bool) { + switch n.typ { + case Float32Type: + return float64(math.Float32frombits(uint32(n.bits))), true + case Float64Type: + return math.Float64frombits(n.bits), true + default: + return 0.0, false + } +} + +// Type will return one of: +// Float64Type, Float32Type, UintType, or IntType. +func (n *Number) Type() Type { + if n.typ == InvalidType { + return IntType + } + return n.typ +} + +// DecodeMsg implements msgp.Decodable +func (n *Number) DecodeMsg(r *Reader) error { + typ, err := r.NextType() + if err != nil { + return err + } + switch typ { + case Float32Type: + f, err := r.ReadFloat32() + if err != nil { + return err + } + n.AsFloat32(f) + return nil + case Float64Type: + f, err := r.ReadFloat64() + if err != nil { + return err + } + n.AsFloat64(f) + return nil + case IntType: + i, err := r.ReadInt64() + if err != nil { + return err + } + n.AsInt(i) + return nil + case UintType: + u, err := r.ReadUint64() + if err != nil { + return err + } + n.AsUint(u) + return nil + default: + return TypeError{Encoded: typ, Method: IntType} + } +} + +// UnmarshalMsg implements msgp.Unmarshaler +func (n *Number) UnmarshalMsg(b []byte) ([]byte, error) { + typ := NextType(b) + switch typ { + case IntType: + i, o, err := ReadInt64Bytes(b) + if err != nil { + return b, err + } + n.AsInt(i) + return o, nil + case UintType: + u, o, err := ReadUint64Bytes(b) + if err != nil { + return b, err + } + n.AsUint(u) + return o, nil + case Float64Type: + f, o, err := ReadFloat64Bytes(b) + if err != nil { + return b, err + } + n.AsFloat64(f) + return o, nil + case Float32Type: + f, o, err := ReadFloat32Bytes(b) + if err != nil { + return b, err + } + n.AsFloat32(f) + return o, nil + default: + return b, TypeError{Method: IntType, Encoded: typ} + } +} + +// MarshalMsg implements msgp.Marshaler +func (n *Number) MarshalMsg(b []byte) ([]byte, error) { + switch n.typ { + case IntType: + return AppendInt64(b, int64(n.bits)), nil + case UintType: + return AppendUint64(b, uint64(n.bits)), nil + case Float64Type: + return AppendFloat64(b, math.Float64frombits(n.bits)), nil + case Float32Type: + return AppendFloat32(b, math.Float32frombits(uint32(n.bits))), nil + default: + return AppendInt64(b, 0), nil + } +} + +// EncodeMsg implements msgp.Encodable +func (n *Number) EncodeMsg(w *Writer) error { + switch n.typ { + case IntType: + return w.WriteInt64(int64(n.bits)) + case UintType: + return w.WriteUint64(n.bits) + case Float64Type: + return w.WriteFloat64(math.Float64frombits(n.bits)) + case Float32Type: + return w.WriteFloat32(math.Float32frombits(uint32(n.bits))) + default: + return w.WriteInt64(0) + } +} + +// Msgsize implements msgp.Sizer +func (n *Number) Msgsize() int { + switch n.typ { + case Float32Type: + return Float32Size + case Float64Type: + return Float64Size + case IntType: + return Int64Size + case UintType: + return Uint64Size + default: + return 1 // fixint(0) + } +} + +// MarshalJSON implements json.Marshaler +func (n *Number) MarshalJSON() ([]byte, error) { + t := n.Type() + if t == InvalidType { + return []byte{'0'}, nil + } + out := make([]byte, 0, 32) + switch t { + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.AppendFloat(out, f, 'f', -1, 64), nil + case IntType: + i, _ := n.Int() + return strconv.AppendInt(out, i, 10), nil + case UintType: + u, _ := n.Uint() + return strconv.AppendUint(out, u, 10), nil + default: + panic("(*Number).typ is invalid") + } +} + +// String implements fmt.Stringer +func (n *Number) String() string { + switch n.typ { + case InvalidType: + return "0" + case Float32Type, Float64Type: + f, _ := n.Float() + return strconv.FormatFloat(f, 'f', -1, 64) + case IntType: + i, _ := n.Int() + return strconv.FormatInt(i, 10) + case UintType: + u, _ := n.Uint() + return strconv.FormatUint(u, 10) + default: + panic("(*Number).typ is invalid") + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read.go b/vendor/github.com/tinylib/msgp/msgp/read.go new file mode 100644 index 0000000000..a493f941a8 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read.go @@ -0,0 +1,1213 @@ +package msgp + +import ( + "io" + "math" + "sync" + "time" + + "github.com/philhofer/fwd" +) + +// where we keep old *Readers +var readerPool = sync.Pool{New: func() interface{} { return &Reader{} }} + +// Type is a MessagePack wire type, +// including this package's built-in +// extension types. +type Type byte + +// MessagePack Types +// +// The zero value of Type +// is InvalidType. +const ( + InvalidType Type = iota + + // MessagePack built-in types + + StrType + BinType + MapType + ArrayType + Float64Type + Float32Type + BoolType + IntType + UintType + NilType + ExtensionType + + // pseudo-types provided + // by extensions + + Complex64Type + Complex128Type + TimeType + + _maxtype +) + +// String implements fmt.Stringer +func (t Type) String() string { + switch t { + case StrType: + return "str" + case BinType: + return "bin" + case MapType: + return "map" + case ArrayType: + return "array" + case Float64Type: + return "float64" + case Float32Type: + return "float32" + case BoolType: + return "bool" + case UintType: + return "uint" + case IntType: + return "int" + case ExtensionType: + return "ext" + case NilType: + return "nil" + default: + return "" + } +} + +func freeR(m *Reader) { + readerPool.Put(m) +} + +// Unmarshaler is the interface fulfilled +// by objects that know how to unmarshal +// themselves from MessagePack. +// UnmarshalMsg unmarshals the object +// from binary, returing any leftover +// bytes and any errors encountered. +type Unmarshaler interface { + UnmarshalMsg([]byte) ([]byte, error) +} + +// Decodable is the interface fulfilled +// by objects that know how to read +// themselves from a *Reader. +type Decodable interface { + DecodeMsg(*Reader) error +} + +// Decode decodes 'd' from 'r'. +func Decode(r io.Reader, d Decodable) error { + rd := NewReader(r) + err := d.DecodeMsg(rd) + freeR(rd) + return err +} + +// NewReader returns a *Reader that +// reads from the provided reader. The +// reader will be buffered. +func NewReader(r io.Reader) *Reader { + p := readerPool.Get().(*Reader) + if p.R == nil { + p.R = fwd.NewReader(r) + } else { + p.R.Reset(r) + } + return p +} + +// NewReaderSize returns a *Reader with a buffer of the given size. +// (This is vastly preferable to passing the decoder a reader that is already buffered.) +func NewReaderSize(r io.Reader, sz int) *Reader { + return &Reader{R: fwd.NewReaderSize(r, sz)} +} + +// Reader wraps an io.Reader and provides +// methods to read MessagePack-encoded values +// from it. Readers are buffered. +type Reader struct { + // R is the buffered reader + // that the Reader uses + // to decode MessagePack. + // The Reader itself + // is stateless; all the + // buffering is done + // within R. + R *fwd.Reader + scratch []byte +} + +// Read implements `io.Reader` +func (m *Reader) Read(p []byte) (int, error) { + return m.R.Read(p) +} + +// ReadFull implements `io.ReadFull` +func (m *Reader) ReadFull(p []byte) (int, error) { + return m.R.ReadFull(p) +} + +// Reset resets the underlying reader. +func (m *Reader) Reset(r io.Reader) { m.R.Reset(r) } + +// Buffered returns the number of bytes currently in the read buffer. +func (m *Reader) Buffered() int { return m.R.Buffered() } + +// BufferSize returns the capacity of the read buffer. +func (m *Reader) BufferSize() int { return m.R.BufferSize() } + +// NextType returns the next object type to be decoded. +func (m *Reader) NextType() (Type, error) { + p, err := m.R.Peek(1) + if err != nil { + return InvalidType, err + } + t := getType(p[0]) + if t == InvalidType { + return t, InvalidPrefixError(p[0]) + } + if t == ExtensionType { + v, err := m.peekExtensionType() + if err != nil { + return InvalidType, err + } + switch v { + case Complex64Extension: + return Complex64Type, nil + case Complex128Extension: + return Complex128Type, nil + case TimeExtension: + return TimeType, nil + } + } + return t, nil +} + +// IsNil returns whether or not +// the next byte is a null messagepack byte +func (m *Reader) IsNil() bool { + p, err := m.R.Peek(1) + return err == nil && p[0] == mnil +} + +// returns (obj size, obj elements, error) +// only maps and arrays have non-zero obj elements +// +// use uintptr b/c it's guaranteed to be large enough +// to hold whatever we can fit in memory. +func getNextSize(r *fwd.Reader) (uintptr, uintptr, error) { + b, err := r.Peek(1) + if err != nil { + return 0, 0, err + } + lead := b[0] + spec := &sizes[lead] + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { + return uintptr(size), uintptr(mode), nil + } + b, err = r.Peek(int(size)) + if err != nil { + return 0, 0, err + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} + +// Skip skips over the next object, regardless of +// its type. If it is an array or map, the whole array +// or map will be skipped. +func (m *Reader) Skip() error { + var ( + v uintptr // bytes + o uintptr // objects + err error + p []byte + ) + + // we can use the faster + // method if we have enough + // buffered data + if m.R.Buffered() >= 5 { + p, err = m.R.Peek(5) + if err != nil { + return err + } + v, o, err = getSize(p) + if err != nil { + return err + } + } else { + v, o, err = getNextSize(m.R) + if err != nil { + return err + } + } + + // 'v' is always non-zero + // if err == nil + _, err = m.R.Skip(int(v)) + if err != nil { + return err + } + + // for maps and slices, skip elements + for x := uintptr(0); x < o; x++ { + err = m.Skip() + if err != nil { + return err + } + } + return nil +} + +// ReadMapHeader reads the next object +// as a map header and returns the size +// of the map and the number of bytes written. +// It will return a TypeError{} if the next +// object is not a map. +func (m *Reader) ReadMapHeader() (sz uint32, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case mmap16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mmap32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKey reads either a 'str' or 'bin' field from +// the reader and returns the value as a []byte. It uses +// scratch for storage if it is large enough. +func (m *Reader) ReadMapKey(scratch []byte) ([]byte, error) { + out, err := m.ReadStringAsBytes(scratch) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return m.ReadBytes(scratch) + } + return nil, err + } + return out, nil +} + +// MapKeyPtr returns a []byte pointing to the contents +// of a valid map key. The key cannot be empty, and it +// must be shorter than the total buffer size of the +// *Reader. Additionally, the returned slice is only +// valid until the next *Reader method call. Users +// should exercise extreme care when using this +// method; writing into the returned slice may +// corrupt future reads. +func (m *Reader) ReadMapKeyPtr() ([]byte, error) { + p, err := m.R.Peek(1) + if err != nil { + return nil, err + } + lead := p[0] + var read int + if isfixstr(lead) { + read = int(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + switch lead { + case mstr8, mbin8: + p, err = m.R.Next(2) + if err != nil { + return nil, err + } + read = int(p[1]) + case mstr16, mbin16: + p, err = m.R.Next(3) + if err != nil { + return nil, err + } + read = int(big.Uint16(p[1:])) + case mstr32, mbin32: + p, err = m.R.Next(5) + if err != nil { + return nil, err + } + read = int(big.Uint32(p[1:])) + default: + return nil, badPrefix(StrType, lead) + } +fill: + if read == 0 { + return nil, ErrShortBytes + } + return m.R.Next(read) +} + +// ReadArrayHeader reads the next object as an +// array header and returns the size of the array +// and the number of bytes read. +func (m *Reader) ReadArrayHeader() (sz uint32, err error) { + var lead byte + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case marray16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + + case marray32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNil reads a 'nil' MessagePack byte from the reader +func (m *Reader) ReadNil() error { + p, err := m.R.Peek(1) + if err != nil { + return err + } + if p[0] != mnil { + return badPrefix(NilType, p[0]) + } + _, err = m.R.Skip(1) + return err +} + +// ReadFloat64 reads a float64 from the reader. +// (If the value on the wire is encoded as a float32, +// it will be up-cast to a float64.) +func (m *Reader) ReadFloat64() (f float64, err error) { + var p []byte + p, err = m.R.Peek(9) + if err != nil { + // we'll allow a coversion from float32 to float64, + // since we don't lose any precision + if err == io.EOF && len(p) > 0 && p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + return + } + if p[0] != mfloat64 { + // see above + if p[0] == mfloat32 { + ef, err := m.ReadFloat32() + return float64(ef), err + } + err = badPrefix(Float64Type, p[0]) + return + } + f = math.Float64frombits(getMuint64(p)) + _, err = m.R.Skip(9) + return +} + +// ReadFloat32 reads a float32 from the reader +func (m *Reader) ReadFloat32() (f float32, err error) { + var p []byte + p, err = m.R.Peek(5) + if err != nil { + return + } + if p[0] != mfloat32 { + err = badPrefix(Float32Type, p[0]) + return + } + f = math.Float32frombits(getMuint32(p)) + _, err = m.R.Skip(5) + return +} + +// ReadBool reads a bool from the reader +func (m *Reader) ReadBool() (b bool, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mtrue: + b = true + case mfalse: + default: + err = badPrefix(BoolType, p[0]) + return + } + _, err = m.R.Skip(1) + return +} + +// ReadInt64 reads an int64 from the reader +func (m *Reader) ReadInt64() (i int64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixint(lead) { + i = int64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } else if isnfixint(lead) { + i = int64(rnfixint(lead)) + _, err = m.R.Skip(1) + return + } + + switch lead { + case mint8: + p, err = m.R.Next(2) + if err != nil { + return + } + i = int64(getMint8(p)) + return + + case mint16: + p, err = m.R.Next(3) + if err != nil { + return + } + i = int64(getMint16(p)) + return + + case mint32: + p, err = m.R.Next(5) + if err != nil { + return + } + i = int64(getMint32(p)) + return + + case mint64: + p, err = m.R.Next(9) + if err != nil { + return + } + i = getMint64(p) + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32 reads an int32 from the reader +func (m *Reader) ReadInt32() (i int32, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt32 || in < math.MinInt32 { + err = IntOverflow{Value: in, FailedBitsize: 32} + return + } + i = int32(in) + return +} + +// ReadInt16 reads an int16 from the reader +func (m *Reader) ReadInt16() (i int16, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt16 || in < math.MinInt16 { + err = IntOverflow{Value: in, FailedBitsize: 16} + return + } + i = int16(in) + return +} + +// ReadInt8 reads an int8 from the reader +func (m *Reader) ReadInt8() (i int8, err error) { + var in int64 + in, err = m.ReadInt64() + if in > math.MaxInt8 || in < math.MinInt8 { + err = IntOverflow{Value: in, FailedBitsize: 8} + return + } + i = int8(in) + return +} + +// ReadInt reads an int from the reader +func (m *Reader) ReadInt() (i int, err error) { + if smallint { + var in int32 + in, err = m.ReadInt32() + i = int(in) + return + } + var in int64 + in, err = m.ReadInt64() + i = int(in) + return +} + +// ReadUint64 reads a uint64 from the reader +func (m *Reader) ReadUint64() (u uint64, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + _, err = m.R.Skip(1) + return + } + switch lead { + case muint8: + p, err = m.R.Next(2) + if err != nil { + return + } + u = uint64(getMuint8(p)) + return + + case muint16: + p, err = m.R.Next(3) + if err != nil { + return + } + u = uint64(getMuint16(p)) + return + + case muint32: + p, err = m.R.Next(5) + if err != nil { + return + } + u = uint64(getMuint32(p)) + return + + case muint64: + p, err = m.R.Next(9) + if err != nil { + return + } + u = getMuint64(p) + return + + default: + err = badPrefix(UintType, lead) + return + + } +} + +// ReadUint32 reads a uint32 from the reader +func (m *Reader) ReadUint32() (u uint32, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint32 { + err = UintOverflow{Value: in, FailedBitsize: 32} + return + } + u = uint32(in) + return +} + +// ReadUint16 reads a uint16 from the reader +func (m *Reader) ReadUint16() (u uint16, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint16 { + err = UintOverflow{Value: in, FailedBitsize: 16} + return + } + u = uint16(in) + return +} + +// ReadUint8 reads a uint8 from the reader +func (m *Reader) ReadUint8() (u uint8, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + u = uint8(in) + return +} + +// ReadUint reads a uint from the reader +func (m *Reader) ReadUint() (u uint, err error) { + if smallint { + var un uint32 + un, err = m.ReadUint32() + u = uint(un) + return + } + var un uint64 + un, err = m.ReadUint64() + u = uint(un) + return +} + +// ReadByte is analogous to ReadUint8. +// +// NOTE: this is *not* an implementation +// of io.ByteReader. +func (m *Reader) ReadByte() (b byte, err error) { + var in uint64 + in, err = m.ReadUint64() + if in > math.MaxUint8 { + err = UintOverflow{Value: in, FailedBitsize: 8} + return + } + b = byte(in) + return +} + +// ReadBytes reads a MessagePack 'bin' object +// from the reader and returns its value. It may +// use 'scratch' for storage if it is non-nil. +func (m *Reader) ReadBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(2) + if err != nil { + return + } + lead = p[0] + var read int64 + switch lead { + case mbin8: + read = int64(p[1]) + m.R.Skip(2) + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(BinType, lead) + return + } + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadBytesHeader reads the size header +// of a MessagePack 'bin' object. The user +// is responsible for dealing with the next +// 'sz' bytes from the reader in an application-specific +// way. +func (m *Reader) ReadBytesHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + switch p[0] { + case mbin8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mbin16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mbin32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = uint32(big.Uint32(p[1:])) + return + default: + err = badPrefix(BinType, p[0]) + return + } +} + +// ReadExactBytes reads a MessagePack 'bin'-encoded +// object off of the wire into the provided slice. An +// ArrayError will be returned if the object is not +// exactly the length of the input slice. +func (m *Reader) ReadExactBytes(into []byte) error { + p, err := m.R.Peek(2) + if err != nil { + return err + } + lead := p[0] + var read int64 // bytes to read + var skip int // prefix size to skip + switch lead { + case mbin8: + read = int64(p[1]) + skip = 2 + case mbin16: + p, err = m.R.Peek(3) + if err != nil { + return err + } + read = int64(big.Uint16(p[1:])) + skip = 3 + case mbin32: + p, err = m.R.Peek(5) + if err != nil { + return err + } + read = int64(big.Uint32(p[1:])) + skip = 5 + default: + return badPrefix(BinType, lead) + } + if read != int64(len(into)) { + return ArrayError{Wanted: uint32(len(into)), Got: uint32(read)} + } + m.R.Skip(skip) + _, err = m.R.ReadFull(into) + return err +} + +// ReadStringAsBytes reads a MessagePack 'str' (utf-8) string +// and returns its value as bytes. It may use 'scratch' for storage +// if it is non-nil. +func (m *Reader) ReadStringAsBytes(scratch []byte) (b []byte, err error) { + var p []byte + var lead byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + var read int64 + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if int64(cap(scratch)) < read { + b = make([]byte, read) + } else { + b = scratch[0:read] + } + _, err = m.R.ReadFull(b) + return +} + +// ReadStringHeader reads a string header +// off of the wire. The user is then responsible +// for dealing with the next 'sz' bytes from +// the reader in an application-specific manner. +func (m *Reader) ReadStringHeader() (sz uint32, err error) { + var p []byte + p, err = m.R.Peek(1) + if err != nil { + return + } + lead := p[0] + if isfixstr(lead) { + sz = uint32(rfixstr(lead)) + m.R.Skip(1) + return + } + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + sz = uint32(p[1]) + return + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + sz = uint32(big.Uint16(p[1:])) + return + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + sz = big.Uint32(p[1:]) + return + default: + err = badPrefix(StrType, lead) + return + } +} + +// ReadString reads a utf-8 string from the reader +func (m *Reader) ReadString() (s string, err error) { + var p []byte + var lead byte + var read int64 + p, err = m.R.Peek(1) + if err != nil { + return + } + lead = p[0] + + if isfixstr(lead) { + read = int64(rfixstr(lead)) + m.R.Skip(1) + goto fill + } + + switch lead { + case mstr8: + p, err = m.R.Next(2) + if err != nil { + return + } + read = int64(uint8(p[1])) + case mstr16: + p, err = m.R.Next(3) + if err != nil { + return + } + read = int64(big.Uint16(p[1:])) + case mstr32: + p, err = m.R.Next(5) + if err != nil { + return + } + read = int64(big.Uint32(p[1:])) + default: + err = badPrefix(StrType, lead) + return + } +fill: + if read == 0 { + s, err = "", nil + return + } + // reading into the memory + // that will become the string + // itself has vastly superior + // worst-case performance, because + // the reader buffer doesn't have + // to be large enough to hold the string. + // the idea here is to make it more + // difficult for someone malicious + // to cause the system to run out of + // memory by sending very large strings. + // + // NOTE: this works because the argument + // passed to (*fwd.Reader).ReadFull escapes + // to the heap; its argument may, in turn, + // be passed to the underlying reader, and + // thus escape analysis *must* conclude that + // 'out' escapes. + out := make([]byte, read) + _, err = m.R.ReadFull(out) + if err != nil { + return + } + s = UnsafeString(out) + return +} + +// ReadComplex64 reads a complex64 from the reader +func (m *Reader) ReadComplex64() (f complex64, err error) { + var p []byte + p, err = m.R.Peek(10) + if err != nil { + return + } + if p[0] != mfixext8 { + err = badPrefix(Complex64Type, p[0]) + return + } + if int8(p[1]) != Complex64Extension { + err = errExt(int8(p[1]), Complex64Extension) + return + } + f = complex(math.Float32frombits(big.Uint32(p[2:])), + math.Float32frombits(big.Uint32(p[6:]))) + _, err = m.R.Skip(10) + return +} + +// ReadComplex128 reads a complex128 from the reader +func (m *Reader) ReadComplex128() (f complex128, err error) { + var p []byte + p, err = m.R.Peek(18) + if err != nil { + return + } + if p[0] != mfixext16 { + err = badPrefix(Complex128Type, p[0]) + return + } + if int8(p[1]) != Complex128Extension { + err = errExt(int8(p[1]), Complex128Extension) + return + } + f = complex(math.Float64frombits(big.Uint64(p[2:])), + math.Float64frombits(big.Uint64(p[10:]))) + _, err = m.R.Skip(18) + return +} + +// ReadMapStrIntf reads a MessagePack map into a map[string]interface{}. +// (You must pass a non-nil map into the function.) +func (m *Reader) ReadMapStrIntf(mp map[string]interface{}) (err error) { + var sz uint32 + sz, err = m.ReadMapHeader() + if err != nil { + return + } + for key := range mp { + delete(mp, key) + } + for i := uint32(0); i < sz; i++ { + var key string + var val interface{} + key, err = m.ReadString() + if err != nil { + return + } + val, err = m.ReadIntf() + if err != nil { + return + } + mp[key] = val + } + return +} + +// ReadTime reads a time.Time object from the reader. +// The returned time's location will be set to time.Local. +func (m *Reader) ReadTime() (t time.Time, err error) { + var p []byte + p, err = m.R.Peek(15) + if err != nil { + return + } + if p[0] != mext8 || p[1] != 12 { + err = badPrefix(TimeType, p[0]) + return + } + if int8(p[2]) != TimeExtension { + err = errExt(int8(p[2]), TimeExtension) + return + } + sec, nsec := getUnix(p[3:]) + t = time.Unix(sec, int64(nsec)).Local() + _, err = m.R.Skip(15) + return +} + +// ReadIntf reads out the next object as a raw interface{}. +// Arrays are decoded as []interface{}, and maps are decoded +// as map[string]interface{}. Integers are decoded as int64 +// and unsigned integers are decoded as uint64. +func (m *Reader) ReadIntf() (i interface{}, err error) { + var t Type + t, err = m.NextType() + if err != nil { + return + } + switch t { + case BoolType: + i, err = m.ReadBool() + return + + case IntType: + i, err = m.ReadInt64() + return + + case UintType: + i, err = m.ReadUint64() + return + + case BinType: + i, err = m.ReadBytes(nil) + return + + case StrType: + i, err = m.ReadString() + return + + case Complex64Type: + i, err = m.ReadComplex64() + return + + case Complex128Type: + i, err = m.ReadComplex128() + return + + case TimeType: + i, err = m.ReadTime() + return + + case ExtensionType: + var t int8 + t, err = m.peekExtensionType() + if err != nil { + return + } + f, ok := extensionReg[t] + if ok { + e := f() + err = m.ReadExtension(e) + i = e + return + } + var e RawExtension + e.Type = t + err = m.ReadExtension(&e) + i = &e + return + + case MapType: + mp := make(map[string]interface{}) + err = m.ReadMapStrIntf(mp) + i = mp + return + + case NilType: + err = m.ReadNil() + i = nil + return + + case Float32Type: + i, err = m.ReadFloat32() + return + + case Float64Type: + i, err = m.ReadFloat64() + return + + case ArrayType: + var sz uint32 + sz, err = m.ReadArrayHeader() + + if err != nil { + return + } + out := make([]interface{}, int(sz)) + for j := range out { + out[j], err = m.ReadIntf() + if err != nil { + return + } + } + i = out + return + + default: + return nil, fatal // unreachable + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/read_bytes.go b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go new file mode 100644 index 0000000000..78e466fc1f --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/read_bytes.go @@ -0,0 +1,1089 @@ +package msgp + +import ( + "bytes" + "encoding/binary" + "math" + "time" +) + +var big = binary.BigEndian + +// NextType returns the type of the next +// object in the slice. If the length +// of the input is zero, it returns +// InvalidType. +func NextType(b []byte) Type { + if len(b) == 0 { + return InvalidType + } + spec := sizes[b[0]] + t := spec.typ + if t == ExtensionType && len(b) > int(spec.size) { + var tp int8 + if spec.extra == constsize { + tp = int8(b[1]) + } else { + tp = int8(b[spec.size-1]) + } + switch tp { + case TimeExtension: + return TimeType + case Complex128Extension: + return Complex128Type + case Complex64Extension: + return Complex64Type + default: + return ExtensionType + } + } + return t +} + +// IsNil returns true if len(b)>0 and +// the leading byte is a 'nil' MessagePack +// byte; false otherwise +func IsNil(b []byte) bool { + if len(b) != 0 && b[0] == mnil { + return true + } + return false +} + +// Raw is raw MessagePack. +// Raw allows you to read and write +// data without interpreting its contents. +type Raw []byte + +// MarshalMsg implements msgp.Marshaler. +// It appends the raw contents of 'raw' +// to the provided byte slice. If 'raw' +// is 0 bytes, 'nil' will be appended instead. +func (r Raw) MarshalMsg(b []byte) ([]byte, error) { + i := len(r) + if i == 0 { + return AppendNil(b), nil + } + o, l := ensure(b, i) + copy(o[l:], []byte(r)) + return o, nil +} + +// UnmarshalMsg implements msgp.Unmarshaler. +// It sets the contents of *Raw to be the next +// object in the provided byte slice. +func (r *Raw) UnmarshalMsg(b []byte) ([]byte, error) { + l := len(b) + out, err := Skip(b) + if err != nil { + return b, err + } + rlen := l - len(out) + if cap(*r) < rlen { + *r = make(Raw, rlen) + } else { + *r = (*r)[0:rlen] + } + copy(*r, b[:rlen]) + return out, nil +} + +// EncodeMsg implements msgp.Encodable. +// It writes the raw bytes to the writer. +// If r is empty, it writes 'nil' instead. +func (r Raw) EncodeMsg(w *Writer) error { + if len(r) == 0 { + return w.WriteNil() + } + _, err := w.Write([]byte(r)) + return err +} + +// DecodeMsg implements msgp.Decodable. +// It sets the value of *Raw to be the +// next object on the wire. +func (r *Raw) DecodeMsg(f *Reader) error { + *r = (*r)[:0] + return appendNext(f, (*[]byte)(r)) +} + +// Msgsize implements msgp.Sizer +func (r Raw) Msgsize() int { + l := len(r) + if l == 0 { + return 1 // for 'nil' + } + return l +} + +func appendNext(f *Reader, d *[]byte) error { + amt, o, err := getNextSize(f.R) + if err != nil { + return err + } + var i int + *d, i = ensure(*d, int(amt)) + _, err = f.R.ReadFull((*d)[i:]) + if err != nil { + return err + } + for o > 0 { + err = appendNext(f, d) + if err != nil { + return err + } + o-- + } + return nil +} + +// MarshalJSON implements json.Marshaler +func (r *Raw) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + _, err := UnmarshalAsJSON(&buf, []byte(*r)) + return buf.Bytes(), err +} + +// ReadMapHeaderBytes reads a map header size +// from 'b' and returns the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a map) +func ReadMapHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + if isfixmap(lead) { + sz = uint32(rfixmap(lead)) + o = b[1:] + return + } + + switch lead { + case mmap16: + if l < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case mmap32: + if l < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(MapType, lead) + return + } +} + +// ReadMapKeyZC attempts to read a map key +// from 'b' and returns the key bytes and the remaining bytes +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a str or bin) +func ReadMapKeyZC(b []byte) ([]byte, []byte, error) { + o, b, err := ReadStringZC(b) + if err != nil { + if tperr, ok := err.(TypeError); ok && tperr.Encoded == BinType { + return ReadBytesZC(b) + } + return nil, b, err + } + return o, b, nil +} + +// ReadArrayHeaderBytes attempts to read +// the array header size off of 'b' and return +// the size and remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not an array) +func ReadArrayHeaderBytes(b []byte) (sz uint32, o []byte, err error) { + if len(b) < 1 { + return 0, nil, ErrShortBytes + } + lead := b[0] + if isfixarray(lead) { + sz = uint32(rfixarray(lead)) + o = b[1:] + return + } + + switch lead { + case marray16: + if len(b) < 3 { + err = ErrShortBytes + return + } + sz = uint32(big.Uint16(b[1:])) + o = b[3:] + return + + case marray32: + if len(b) < 5 { + err = ErrShortBytes + return + } + sz = big.Uint32(b[1:]) + o = b[5:] + return + + default: + err = badPrefix(ArrayType, lead) + return + } +} + +// ReadNilBytes tries to read a "nil" byte +// off of 'b' and return the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'nil') +// - InvalidPrefixError +func ReadNilBytes(b []byte) ([]byte, error) { + if len(b) < 1 { + return nil, ErrShortBytes + } + if b[0] != mnil { + return b, badPrefix(NilType, b[0]) + } + return b[1:], nil +} + +// ReadFloat64Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float64) +func ReadFloat64Bytes(b []byte) (f float64, o []byte, err error) { + if len(b) < 9 { + if len(b) >= 5 && b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = ErrShortBytes + return + } + + if b[0] != mfloat64 { + if b[0] == mfloat32 { + var tf float32 + tf, o, err = ReadFloat32Bytes(b) + f = float64(tf) + return + } + err = badPrefix(Float64Type, b[0]) + return + } + + f = math.Float64frombits(getMuint64(b)) + o = b[9:] + return +} + +// ReadFloat32Bytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a float32) +func ReadFloat32Bytes(b []byte) (f float32, o []byte, err error) { + if len(b) < 5 { + err = ErrShortBytes + return + } + + if b[0] != mfloat32 { + err = TypeError{Method: Float32Type, Encoded: getType(b[0])} + return + } + + f = math.Float32frombits(getMuint32(b)) + o = b[5:] + return +} + +// ReadBoolBytes tries to read a float64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a bool) +func ReadBoolBytes(b []byte) (bool, []byte, error) { + if len(b) < 1 { + return false, b, ErrShortBytes + } + switch b[0] { + case mtrue: + return true, b[1:], nil + case mfalse: + return false, b[1:], nil + default: + return false, b, badPrefix(BoolType, b[0]) + } +} + +// ReadInt64Bytes tries to read an int64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError (not a int) +func ReadInt64Bytes(b []byte) (i int64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + i = int64(rfixint(lead)) + o = b[1:] + return + } + if isnfixint(lead) { + i = int64(rnfixint(lead)) + o = b[1:] + return + } + + switch lead { + case mint8: + if l < 2 { + err = ErrShortBytes + return + } + i = int64(getMint8(b)) + o = b[2:] + return + + case mint16: + if l < 3 { + err = ErrShortBytes + return + } + i = int64(getMint16(b)) + o = b[3:] + return + + case mint32: + if l < 5 { + err = ErrShortBytes + return + } + i = int64(getMint32(b)) + o = b[5:] + return + + case mint64: + if l < 9 { + err = ErrShortBytes + return + } + i = getMint64(b) + o = b[9:] + return + + default: + err = badPrefix(IntType, lead) + return + } +} + +// ReadInt32Bytes tries to read an int32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int32) +func ReadInt32Bytes(b []byte) (int32, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt32 || i < math.MinInt32 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 32} + } + return int32(i), o, err +} + +// ReadInt16Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int16) +func ReadInt16Bytes(b []byte) (int16, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt16 || i < math.MinInt16 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 16} + } + return int16(i), o, err +} + +// ReadInt8Bytes tries to read an int16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int8) +func ReadInt8Bytes(b []byte) (int8, []byte, error) { + i, o, err := ReadInt64Bytes(b) + if i > math.MaxInt8 || i < math.MinInt8 { + return 0, o, IntOverflow{Value: i, FailedBitsize: 8} + } + return int8(i), o, err +} + +// ReadIntBytes tries to read an int +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a int) +// - IntOverflow{} (value doesn't fit in int; 32-bit platforms only) +func ReadIntBytes(b []byte) (int, []byte, error) { + if smallint { + i, b, err := ReadInt32Bytes(b) + return int(i), b, err + } + i, b, err := ReadInt64Bytes(b) + return int(i), b, err +} + +// ReadUint64Bytes tries to read a uint64 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +func ReadUint64Bytes(b []byte) (u uint64, o []byte, err error) { + l := len(b) + if l < 1 { + return 0, nil, ErrShortBytes + } + + lead := b[0] + if isfixint(lead) { + u = uint64(rfixint(lead)) + o = b[1:] + return + } + + switch lead { + case muint8: + if l < 2 { + err = ErrShortBytes + return + } + u = uint64(getMuint8(b)) + o = b[2:] + return + + case muint16: + if l < 3 { + err = ErrShortBytes + return + } + u = uint64(getMuint16(b)) + o = b[3:] + return + + case muint32: + if l < 5 { + err = ErrShortBytes + return + } + u = uint64(getMuint32(b)) + o = b[5:] + return + + case muint64: + if l < 9 { + err = ErrShortBytes + return + } + u = getMuint64(b) + o = b[9:] + return + + default: + err = badPrefix(UintType, lead) + return + } +} + +// ReadUint32Bytes tries to read a uint32 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint32) +func ReadUint32Bytes(b []byte) (uint32, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint32 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 32} + } + return uint32(v), o, err +} + +// ReadUint16Bytes tries to read a uint16 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint16) +func ReadUint16Bytes(b []byte) (uint16, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint16 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 16} + } + return uint16(v), o, err +} + +// ReadUint8Bytes tries to read a uint8 +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint8) +func ReadUint8Bytes(b []byte) (uint8, []byte, error) { + v, o, err := ReadUint64Bytes(b) + if v > math.MaxUint8 { + return 0, nil, UintOverflow{Value: v, FailedBitsize: 8} + } + return uint8(v), o, err +} + +// ReadUintBytes tries to read a uint +// from 'b' and return the value and the remaining bytes. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a uint) +// - UintOverflow{} (value too large for uint; 32-bit platforms only) +func ReadUintBytes(b []byte) (uint, []byte, error) { + if smallint { + u, b, err := ReadUint32Bytes(b) + return uint(u), b, err + } + u, b, err := ReadUint64Bytes(b) + return uint(u), b, err +} + +// ReadByteBytes is analogous to ReadUint8Bytes +func ReadByteBytes(b []byte) (byte, []byte, error) { + return ReadUint8Bytes(b) +} + +// ReadBytesBytes reads a 'bin' object +// from 'b' and returns its vaue and +// the remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (too few bytes) +// - TypeError{} (not a 'bin' object) +func ReadBytesBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, scratch, false) +} + +func readBytesBytes(b []byte, scratch []byte, zc bool) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = int(b[1]) + b = b[2:] + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = badPrefix(BinType, lead) + return + } + + if len(b) < read { + err = ErrShortBytes + return + } + + // zero-copy + if zc { + v = b[0:read] + o = b[read:] + return + } + + if cap(scratch) >= read { + v = scratch[0:read] + } else { + v = make([]byte, read) + } + + o = b[copy(v, b):] + return +} + +// ReadBytesZC extracts the messagepack-encoded +// binary field without copying. The returned []byte +// points to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'bin') +func ReadBytesZC(b []byte) (v []byte, o []byte, err error) { + return readBytesBytes(b, nil, true) +} + +func ReadExactBytes(b []byte, into []byte) (o []byte, err error) { + l := len(b) + if l < 1 { + err = ErrShortBytes + return + } + + lead := b[0] + var read uint32 + var skip int + switch lead { + case mbin8: + if l < 2 { + err = ErrShortBytes + return + } + + read = uint32(b[1]) + skip = 2 + + case mbin16: + if l < 3 { + err = ErrShortBytes + return + } + read = uint32(big.Uint16(b[1:])) + skip = 3 + + case mbin32: + if l < 5 { + err = ErrShortBytes + return + } + read = uint32(big.Uint32(b[1:])) + skip = 5 + + default: + err = badPrefix(BinType, lead) + return + } + + if read != uint32(len(into)) { + err = ArrayError{Wanted: uint32(len(into)), Got: read} + return + } + + o = b[skip+copy(into, b[skip:]):] + return +} + +// ReadStringZC reads a messagepack string field +// without copying. The returned []byte points +// to the same memory as the input slice. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (object not 'str') +func ReadStringZC(b []byte) (v []byte, o []byte, err error) { + l := len(b) + if l < 1 { + return nil, nil, ErrShortBytes + } + + lead := b[0] + var read int + + if isfixstr(lead) { + read = int(rfixstr(lead)) + b = b[1:] + } else { + switch lead { + case mstr8: + if l < 2 { + err = ErrShortBytes + return + } + read = int(b[1]) + b = b[2:] + + case mstr16: + if l < 3 { + err = ErrShortBytes + return + } + read = int(big.Uint16(b[1:])) + b = b[3:] + + case mstr32: + if l < 5 { + err = ErrShortBytes + return + } + read = int(big.Uint32(b[1:])) + b = b[5:] + + default: + err = TypeError{Method: StrType, Encoded: getType(lead)} + return + } + } + + if len(b) < read { + err = ErrShortBytes + return + } + + v = b[0:read] + o = b[read:] + return +} + +// ReadStringBytes reads a 'str' object +// from 'b' and returns its value and the +// remaining bytes in 'b'. +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError +func ReadStringBytes(b []byte) (string, []byte, error) { + v, o, err := ReadStringZC(b) + return string(v), o, err +} + +// ReadStringAsBytes reads a 'str' object +// into a slice of bytes. 'v' is the value of +// the 'str' object, which may reside in memory +// pointed to by 'scratch.' 'o' is the remaining bytes +// in 'b.'' +// Possible errors: +// - ErrShortBytes (b not long enough) +// - TypeError{} (not 'str' type) +// - InvalidPrefixError (unknown type marker) +func ReadStringAsBytes(b []byte, scratch []byte) (v []byte, o []byte, err error) { + var tmp []byte + tmp, o, err = ReadStringZC(b) + v = append(scratch[:0], tmp...) + return +} + +// ReadComplex128Bytes reads a complex128 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex128) +// - InvalidPrefixError +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex128) +func ReadComplex128Bytes(b []byte) (c complex128, o []byte, err error) { + if len(b) < 18 { + err = ErrShortBytes + return + } + if b[0] != mfixext16 { + err = badPrefix(Complex128Type, b[0]) + return + } + if int8(b[1]) != Complex128Extension { + err = errExt(int8(b[1]), Complex128Extension) + return + } + c = complex(math.Float64frombits(big.Uint64(b[2:])), + math.Float64frombits(big.Uint64(b[10:]))) + o = b[18:] + return +} + +// ReadComplex64Bytes reads a complex64 +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a complex64) +func ReadComplex64Bytes(b []byte) (c complex64, o []byte, err error) { + if len(b) < 10 { + err = ErrShortBytes + return + } + if b[0] != mfixext8 { + err = badPrefix(Complex64Type, b[0]) + return + } + if b[1] != Complex64Extension { + err = errExt(int8(b[1]), Complex64Extension) + return + } + c = complex(math.Float32frombits(big.Uint32(b[2:])), + math.Float32frombits(big.Uint32(b[6:]))) + o = b[10:] + return +} + +// ReadTimeBytes reads a time.Time +// extension object from 'b' and returns the +// remaining bytes. +// Possible errors: +// - ErrShortBytes (not enough bytes in 'b') +// - TypeError{} (object not a complex64) +// - ExtensionTypeError{} (object an extension of the correct size, but not a time.Time) +func ReadTimeBytes(b []byte) (t time.Time, o []byte, err error) { + if len(b) < 15 { + err = ErrShortBytes + return + } + if b[0] != mext8 || b[1] != 12 { + err = badPrefix(TimeType, b[0]) + return + } + if int8(b[2]) != TimeExtension { + err = errExt(int8(b[2]), TimeExtension) + return + } + sec, nsec := getUnix(b[3:]) + t = time.Unix(sec, int64(nsec)).Local() + o = b[15:] + return +} + +// ReadMapStrIntfBytes reads a map[string]interface{} +// out of 'b' and returns the map and remaining bytes. +// If 'old' is non-nil, the values will be read into that map. +func ReadMapStrIntfBytes(b []byte, old map[string]interface{}) (v map[string]interface{}, o []byte, err error) { + var sz uint32 + o = b + sz, o, err = ReadMapHeaderBytes(o) + + if err != nil { + return + } + + if old != nil { + for key := range old { + delete(old, key) + } + v = old + } else { + v = make(map[string]interface{}, int(sz)) + } + + for z := uint32(0); z < sz; z++ { + if len(o) < 1 { + err = ErrShortBytes + return + } + var key []byte + key, o, err = ReadMapKeyZC(o) + if err != nil { + return + } + var val interface{} + val, o, err = ReadIntfBytes(o) + if err != nil { + return + } + v[string(key)] = val + } + return +} + +// ReadIntfBytes attempts to read +// the next object out of 'b' as a raw interface{} and +// return the remaining bytes. +func ReadIntfBytes(b []byte) (i interface{}, o []byte, err error) { + if len(b) < 1 { + err = ErrShortBytes + return + } + + k := NextType(b) + + switch k { + case MapType: + i, o, err = ReadMapStrIntfBytes(b, nil) + return + + case ArrayType: + var sz uint32 + sz, o, err = ReadArrayHeaderBytes(b) + if err != nil { + return + } + j := make([]interface{}, int(sz)) + i = j + for d := range j { + j[d], o, err = ReadIntfBytes(o) + if err != nil { + return + } + } + return + + case Float32Type: + i, o, err = ReadFloat32Bytes(b) + return + + case Float64Type: + i, o, err = ReadFloat64Bytes(b) + return + + case IntType: + i, o, err = ReadInt64Bytes(b) + return + + case UintType: + i, o, err = ReadUint64Bytes(b) + return + + case BoolType: + i, o, err = ReadBoolBytes(b) + return + + case TimeType: + i, o, err = ReadTimeBytes(b) + return + + case Complex64Type: + i, o, err = ReadComplex64Bytes(b) + return + + case Complex128Type: + i, o, err = ReadComplex128Bytes(b) + return + + case ExtensionType: + var t int8 + t, err = peekExtension(b) + if err != nil { + return + } + // use a user-defined extension, + // if it's been registered + f, ok := extensionReg[t] + if ok { + e := f() + o, err = ReadExtensionBytes(b, e) + i = e + return + } + // last resort is a raw extension + e := RawExtension{} + e.Type = int8(t) + o, err = ReadExtensionBytes(b, &e) + i = &e + return + + case NilType: + o, err = ReadNilBytes(b) + return + + case BinType: + i, o, err = ReadBytesBytes(b, nil) + return + + case StrType: + i, o, err = ReadStringBytes(b) + return + + default: + err = InvalidPrefixError(b[0]) + return + } +} + +// Skip skips the next object in 'b' and +// returns the remaining bytes. If the object +// is a map or array, all of its elements +// will be skipped. +// Possible Errors: +// - ErrShortBytes (not enough bytes in b) +// - InvalidPrefixError (bad encoding) +func Skip(b []byte) ([]byte, error) { + sz, asz, err := getSize(b) + if err != nil { + return b, err + } + if uintptr(len(b)) < sz { + return b, ErrShortBytes + } + b = b[sz:] + for asz > 0 { + b, err = Skip(b) + if err != nil { + return b, err + } + asz-- + } + return b, nil +} + +// returns (skip N bytes, skip M objects, error) +func getSize(b []byte) (uintptr, uintptr, error) { + l := len(b) + if l == 0 { + return 0, 0, ErrShortBytes + } + lead := b[0] + spec := &sizes[lead] // get type information + size, mode := spec.size, spec.extra + if size == 0 { + return 0, 0, InvalidPrefixError(lead) + } + if mode >= 0 { // fixed composites + return uintptr(size), uintptr(mode), nil + } + if l < int(size) { + return 0, 0, ErrShortBytes + } + switch mode { + case extra8: + return uintptr(size) + uintptr(b[1]), 0, nil + case extra16: + return uintptr(size) + uintptr(big.Uint16(b[1:])), 0, nil + case extra32: + return uintptr(size) + uintptr(big.Uint32(b[1:])), 0, nil + case map16v: + return uintptr(size), 2 * uintptr(big.Uint16(b[1:])), nil + case map32v: + return uintptr(size), 2 * uintptr(big.Uint32(b[1:])), nil + case array16v: + return uintptr(size), uintptr(big.Uint16(b[1:])), nil + case array32v: + return uintptr(size), uintptr(big.Uint32(b[1:])), nil + default: + return 0, 0, fatal + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/size.go b/vendor/github.com/tinylib/msgp/msgp/size.go new file mode 100644 index 0000000000..ce2f8b16ff --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/size.go @@ -0,0 +1,38 @@ +package msgp + +// The sizes provided +// are the worst-case +// encoded sizes for +// each type. For variable- +// length types ([]byte, string), +// the total encoded size is +// the prefix size plus the +// length of the object. +const ( + Int64Size = 9 + IntSize = Int64Size + UintSize = Int64Size + Int8Size = 2 + Int16Size = 3 + Int32Size = 5 + Uint8Size = 2 + ByteSize = Uint8Size + Uint16Size = 3 + Uint32Size = 5 + Uint64Size = Int64Size + Float64Size = 9 + Float32Size = 5 + Complex64Size = 10 + Complex128Size = 18 + + TimeSize = 15 + BoolSize = 1 + NilSize = 1 + + MapHeaderSize = 5 + ArrayHeaderSize = 5 + + BytesPrefixSize = 5 + StringPrefixSize = 5 + ExtensionPrefixSize = 6 +) diff --git a/vendor/github.com/tinylib/msgp/msgp/unsafe.go b/vendor/github.com/tinylib/msgp/msgp/unsafe.go new file mode 100644 index 0000000000..0cb972e3be --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/unsafe.go @@ -0,0 +1,40 @@ +// +build !appengine + +package msgp + +import ( + "reflect" + "unsafe" +) + +// NOTE: +// all of the definition in this file +// should be repeated in appengine.go, +// but without using unsafe + +const ( + // spec says int and uint are always + // the same size, but that int/uint + // size may not be machine word size + smallint = unsafe.Sizeof(int(0)) == 4 +) + +// UnsafeString returns the byte slice as a volatile string +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeString(b []byte) string { + return *(*string)(unsafe.Pointer(&reflect.StringHeader{Data: uintptr(unsafe.Pointer(&b[0])), Len: len(b)})) +} + +// UnsafeBytes returns the string as a byte slice +// THIS SHOULD ONLY BE USED BY THE CODE GENERATOR. +// THIS IS EVIL CODE. +// YOU HAVE BEEN WARNED. +func UnsafeBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer(&reflect.SliceHeader{ + Len: len(s), + Cap: len(s), + Data: (*(*reflect.StringHeader)(unsafe.Pointer(&s))).Data, + })) +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write.go b/vendor/github.com/tinylib/msgp/msgp/write.go new file mode 100644 index 0000000000..0245c1bd79 --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write.go @@ -0,0 +1,845 @@ +package msgp + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "sync" + "time" +) + +// Sizer is an interface implemented +// by types that can estimate their +// size when MessagePack encoded. +// This interface is optional, but +// encoding/marshaling implementations +// may use this as a way to pre-allocate +// memory for serialization. +type Sizer interface { + Msgsize() int +} + +var ( + // Nowhere is an io.Writer to nowhere + Nowhere io.Writer = nwhere{} + + btsType = reflect.TypeOf(([]byte)(nil)) + writerPool = sync.Pool{ + New: func() interface{} { + return &Writer{buf: make([]byte, 2048)} + }, + } +) + +func popWriter(w io.Writer) *Writer { + wr := writerPool.Get().(*Writer) + wr.Reset(w) + return wr +} + +func pushWriter(wr *Writer) { + wr.w = nil + wr.wloc = 0 + writerPool.Put(wr) +} + +// freeW frees a writer for use +// by other processes. It is not necessary +// to call freeW on a writer. However, maintaining +// a reference to a *Writer after calling freeW on +// it will cause undefined behavior. +func freeW(w *Writer) { pushWriter(w) } + +// Require ensures that cap(old)-len(old) >= extra. +func Require(old []byte, extra int) []byte { + l := len(old) + c := cap(old) + r := l + extra + if c >= r { + return old + } else if l == 0 { + return make([]byte, 0, extra) + } + // the new size is the greater + // of double the old capacity + // and the sum of the old length + // and the number of new bytes + // necessary. + c <<= 1 + if c < r { + c = r + } + n := make([]byte, l, c) + copy(n, old) + return n +} + +// nowhere writer +type nwhere struct{} + +func (n nwhere) Write(p []byte) (int, error) { return len(p), nil } + +// Marshaler is the interface implemented +// by types that know how to marshal themselves +// as MessagePack. MarshalMsg appends the marshalled +// form of the object to the provided +// byte slice, returning the extended +// slice and any errors encountered. +type Marshaler interface { + MarshalMsg([]byte) ([]byte, error) +} + +// Encodable is the interface implemented +// by types that know how to write themselves +// as MessagePack using a *msgp.Writer. +type Encodable interface { + EncodeMsg(*Writer) error +} + +// Writer is a buffered writer +// that can be used to write +// MessagePack objects to an io.Writer. +// You must call *Writer.Flush() in order +// to flush all of the buffered data +// to the underlying writer. +type Writer struct { + w io.Writer + buf []byte + wloc int +} + +// NewWriter returns a new *Writer. +func NewWriter(w io.Writer) *Writer { + if wr, ok := w.(*Writer); ok { + return wr + } + return popWriter(w) +} + +// NewWriterSize returns a writer with a custom buffer size. +func NewWriterSize(w io.Writer, sz int) *Writer { + // we must be able to require() 18 + // contiguous bytes, so that is the + // practical minimum buffer size + if sz < 18 { + sz = 18 + } + + return &Writer{ + w: w, + buf: make([]byte, sz), + } +} + +// Encode encodes an Encodable to an io.Writer. +func Encode(w io.Writer, e Encodable) error { + wr := NewWriter(w) + err := e.EncodeMsg(wr) + if err == nil { + err = wr.Flush() + } + freeW(wr) + return err +} + +func (mw *Writer) flush() error { + if mw.wloc == 0 { + return nil + } + n, err := mw.w.Write(mw.buf[:mw.wloc]) + if err != nil { + if n > 0 { + mw.wloc = copy(mw.buf, mw.buf[n:mw.wloc]) + } + return err + } + mw.wloc = 0 + return nil +} + +// Flush flushes all of the buffered +// data to the underlying writer. +func (mw *Writer) Flush() error { return mw.flush() } + +// Buffered returns the number bytes in the write buffer +func (mw *Writer) Buffered() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) avail() int { return len(mw.buf) - mw.wloc } + +func (mw *Writer) bufsize() int { return len(mw.buf) } + +// NOTE: this should only be called with +// a number that is guaranteed to be less than +// len(mw.buf). typically, it is called with a constant. +// +// NOTE: this is a hot code path +func (mw *Writer) require(n int) (int, error) { + c := len(mw.buf) + wl := mw.wloc + if c-wl < n { + if err := mw.flush(); err != nil { + return 0, err + } + wl = mw.wloc + } + mw.wloc += n + return wl, nil +} + +func (mw *Writer) Append(b ...byte) error { + if mw.avail() < len(b) { + err := mw.flush() + if err != nil { + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], b) + return nil +} + +// push one byte onto the buffer +// +// NOTE: this is a hot code path +func (mw *Writer) push(b byte) error { + if mw.wloc == len(mw.buf) { + if err := mw.flush(); err != nil { + return err + } + } + mw.buf[mw.wloc] = b + mw.wloc++ + return nil +} + +func (mw *Writer) prefix8(b byte, u uint8) error { + const need = 2 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu8(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix16(b byte, u uint16) error { + const need = 3 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu16(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix32(b byte, u uint32) error { + const need = 5 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu32(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +func (mw *Writer) prefix64(b byte, u uint64) error { + const need = 9 + if len(mw.buf)-mw.wloc < need { + if err := mw.flush(); err != nil { + return err + } + } + prefixu64(mw.buf[mw.wloc:], b, u) + mw.wloc += need + return nil +} + +// Write implements io.Writer, and writes +// data directly to the buffer. +func (mw *Writer) Write(p []byte) (int, error) { + l := len(p) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return 0, err + } + if l > len(mw.buf) { + return mw.w.Write(p) + } + } + mw.wloc += copy(mw.buf[mw.wloc:], p) + return l, nil +} + +// implements io.WriteString +func (mw *Writer) writeString(s string) error { + l := len(s) + if mw.avail() < l { + if err := mw.flush(); err != nil { + return err + } + if l > len(mw.buf) { + _, err := io.WriteString(mw.w, s) + return err + } + } + mw.wloc += copy(mw.buf[mw.wloc:], s) + return nil +} + +// Reset changes the underlying writer used by the Writer +func (mw *Writer) Reset(w io.Writer) { + mw.buf = mw.buf[:cap(mw.buf)] + mw.w = w + mw.wloc = 0 +} + +// WriteMapHeader writes a map header of the given +// size to the writer +func (mw *Writer) WriteMapHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixmap(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(mmap16, uint16(sz)) + default: + return mw.prefix32(mmap32, sz) + } +} + +// WriteArrayHeader writes an array header of the +// given size to the writer +func (mw *Writer) WriteArrayHeader(sz uint32) error { + switch { + case sz <= 15: + return mw.push(wfixarray(uint8(sz))) + case sz <= math.MaxUint16: + return mw.prefix16(marray16, uint16(sz)) + default: + return mw.prefix32(marray32, sz) + } +} + +// WriteNil writes a nil byte to the buffer +func (mw *Writer) WriteNil() error { + return mw.push(mnil) +} + +// WriteFloat64 writes a float64 to the writer +func (mw *Writer) WriteFloat64(f float64) error { + return mw.prefix64(mfloat64, math.Float64bits(f)) +} + +// WriteFloat32 writes a float32 to the writer +func (mw *Writer) WriteFloat32(f float32) error { + return mw.prefix32(mfloat32, math.Float32bits(f)) +} + +// WriteInt64 writes an int64 to the writer +func (mw *Writer) WriteInt64(i int64) error { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return mw.push(wfixint(uint8(i))) + case i <= math.MaxInt16: + return mw.prefix16(mint16, uint16(i)) + case i <= math.MaxInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } + } + switch { + case i >= -32: + return mw.push(wnfixint(int8(i))) + case i >= math.MinInt8: + return mw.prefix8(mint8, uint8(i)) + case i >= math.MinInt16: + return mw.prefix16(mint16, uint16(i)) + case i >= math.MinInt32: + return mw.prefix32(mint32, uint32(i)) + default: + return mw.prefix64(mint64, uint64(i)) + } +} + +// WriteInt8 writes an int8 to the writer +func (mw *Writer) WriteInt8(i int8) error { return mw.WriteInt64(int64(i)) } + +// WriteInt16 writes an int16 to the writer +func (mw *Writer) WriteInt16(i int16) error { return mw.WriteInt64(int64(i)) } + +// WriteInt32 writes an int32 to the writer +func (mw *Writer) WriteInt32(i int32) error { return mw.WriteInt64(int64(i)) } + +// WriteInt writes an int to the writer +func (mw *Writer) WriteInt(i int) error { return mw.WriteInt64(int64(i)) } + +// WriteUint64 writes a uint64 to the writer +func (mw *Writer) WriteUint64(u uint64) error { + switch { + case u <= (1<<7)-1: + return mw.push(wfixint(uint8(u))) + case u <= math.MaxUint8: + return mw.prefix8(muint8, uint8(u)) + case u <= math.MaxUint16: + return mw.prefix16(muint16, uint16(u)) + case u <= math.MaxUint32: + return mw.prefix32(muint32, uint32(u)) + default: + return mw.prefix64(muint64, u) + } +} + +// WriteByte is analogous to WriteUint8 +func (mw *Writer) WriteByte(u byte) error { return mw.WriteUint8(uint8(u)) } + +// WriteUint8 writes a uint8 to the writer +func (mw *Writer) WriteUint8(u uint8) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint16 writes a uint16 to the writer +func (mw *Writer) WriteUint16(u uint16) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint32 writes a uint32 to the writer +func (mw *Writer) WriteUint32(u uint32) error { return mw.WriteUint64(uint64(u)) } + +// WriteUint writes a uint to the writer +func (mw *Writer) WriteUint(u uint) error { return mw.WriteUint64(uint64(u)) } + +// WriteBytes writes binary as 'bin' to the writer +func (mw *Writer) WriteBytes(b []byte) error { + sz := uint32(len(b)) + var err error + switch { + case sz <= math.MaxUint8: + err = mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mbin16, uint16(sz)) + default: + err = mw.prefix32(mbin32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(b) + return err +} + +// WriteBytesHeader writes just the size header +// of a MessagePack 'bin' object. The user is responsible +// for then writing 'sz' more bytes into the stream. +func (mw *Writer) WriteBytesHeader(sz uint32) error { + switch { + case sz <= math.MaxUint8: + return mw.prefix8(mbin8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mbin16, uint16(sz)) + default: + return mw.prefix32(mbin32, sz) + } +} + +// WriteBool writes a bool to the writer +func (mw *Writer) WriteBool(b bool) error { + if b { + return mw.push(mtrue) + } + return mw.push(mfalse) +} + +// WriteString writes a messagepack string to the writer. +// (This is NOT an implementation of io.StringWriter) +func (mw *Writer) WriteString(s string) error { + sz := uint32(len(s)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + return mw.writeString(s) +} + +// WriteStringHeader writes just the string size +// header of a MessagePack 'str' object. The user +// is responsible for writing 'sz' more valid UTF-8 +// bytes to the stream. +func (mw *Writer) WriteStringHeader(sz uint32) error { + switch { + case sz <= 31: + return mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + return mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + return mw.prefix16(mstr16, uint16(sz)) + default: + return mw.prefix32(mstr32, sz) + } +} + +// WriteStringFromBytes writes a 'str' object +// from a []byte. +func (mw *Writer) WriteStringFromBytes(str []byte) error { + sz := uint32(len(str)) + var err error + switch { + case sz <= 31: + err = mw.push(wfixstr(uint8(sz))) + case sz <= math.MaxUint8: + err = mw.prefix8(mstr8, uint8(sz)) + case sz <= math.MaxUint16: + err = mw.prefix16(mstr16, uint16(sz)) + default: + err = mw.prefix32(mstr32, sz) + } + if err != nil { + return err + } + _, err = mw.Write(str) + return err +} + +// WriteComplex64 writes a complex64 to the writer +func (mw *Writer) WriteComplex64(f complex64) error { + o, err := mw.require(10) + if err != nil { + return err + } + mw.buf[o] = mfixext8 + mw.buf[o+1] = Complex64Extension + big.PutUint32(mw.buf[o+2:], math.Float32bits(real(f))) + big.PutUint32(mw.buf[o+6:], math.Float32bits(imag(f))) + return nil +} + +// WriteComplex128 writes a complex128 to the writer +func (mw *Writer) WriteComplex128(f complex128) error { + o, err := mw.require(18) + if err != nil { + return err + } + mw.buf[o] = mfixext16 + mw.buf[o+1] = Complex128Extension + big.PutUint64(mw.buf[o+2:], math.Float64bits(real(f))) + big.PutUint64(mw.buf[o+10:], math.Float64bits(imag(f))) + return nil +} + +// WriteMapStrStr writes a map[string]string to the writer +func (mw *Writer) WriteMapStrStr(mp map[string]string) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteString(val) + if err != nil { + return + } + } + return nil +} + +// WriteMapStrIntf writes a map[string]interface to the writer +func (mw *Writer) WriteMapStrIntf(mp map[string]interface{}) (err error) { + err = mw.WriteMapHeader(uint32(len(mp))) + if err != nil { + return + } + for key, val := range mp { + err = mw.WriteString(key) + if err != nil { + return + } + err = mw.WriteIntf(val) + if err != nil { + return + } + } + return +} + +// WriteTime writes a time.Time object to the wire. +// +// Time is encoded as Unix time, which means that +// location (time zone) data is removed from the object. +// The encoded object itself is 12 bytes: 8 bytes for +// a big-endian 64-bit integer denoting seconds +// elapsed since "zero" Unix time, followed by 4 bytes +// for a big-endian 32-bit signed integer denoting +// the nanosecond offset of the time. This encoding +// is intended to ease portability across languages. +// (Note that this is *not* the standard time.Time +// binary encoding, because its implementation relies +// heavily on the internal representation used by the +// time package.) +func (mw *Writer) WriteTime(t time.Time) error { + t = t.UTC() + o, err := mw.require(15) + if err != nil { + return err + } + mw.buf[o] = mext8 + mw.buf[o+1] = 12 + mw.buf[o+2] = TimeExtension + putUnix(mw.buf[o+3:], t.Unix(), int32(t.Nanosecond())) + return nil +} + +// WriteIntf writes the concrete type of 'v'. +// WriteIntf will error if 'v' is not one of the following: +// - A bool, float, string, []byte, int, uint, or complex +// - A map of supported types (with string keys) +// - An array or slice of supported types +// - A pointer to a supported type +// - A type that satisfies the msgp.Encodable interface +// - A type that satisfies the msgp.Extension interface +func (mw *Writer) WriteIntf(v interface{}) error { + if v == nil { + return mw.WriteNil() + } + switch v := v.(type) { + + // preferred interfaces + + case Encodable: + return v.EncodeMsg(mw) + case Extension: + return mw.WriteExtension(v) + + // concrete types + + case bool: + return mw.WriteBool(v) + case float32: + return mw.WriteFloat32(v) + case float64: + return mw.WriteFloat64(v) + case complex64: + return mw.WriteComplex64(v) + case complex128: + return mw.WriteComplex128(v) + case uint8: + return mw.WriteUint8(v) + case uint16: + return mw.WriteUint16(v) + case uint32: + return mw.WriteUint32(v) + case uint64: + return mw.WriteUint64(v) + case uint: + return mw.WriteUint(v) + case int8: + return mw.WriteInt8(v) + case int16: + return mw.WriteInt16(v) + case int32: + return mw.WriteInt32(v) + case int64: + return mw.WriteInt64(v) + case int: + return mw.WriteInt(v) + case string: + return mw.WriteString(v) + case []byte: + return mw.WriteBytes(v) + case map[string]string: + return mw.WriteMapStrStr(v) + case map[string]interface{}: + return mw.WriteMapStrIntf(v) + case time.Time: + return mw.WriteTime(v) + } + + val := reflect.ValueOf(v) + if !isSupported(val.Kind()) || !val.IsValid() { + return fmt.Errorf("msgp: type %s not supported", val) + } + + switch val.Kind() { + case reflect.Ptr: + if val.IsNil() { + return mw.WriteNil() + } + return mw.WriteIntf(val.Elem().Interface()) + case reflect.Slice: + return mw.writeSlice(val) + case reflect.Map: + return mw.writeMap(val) + } + return &ErrUnsupportedType{val.Type()} +} + +func (mw *Writer) writeMap(v reflect.Value) (err error) { + if v.Elem().Kind() != reflect.String { + return errors.New("msgp: map keys must be strings") + } + ks := v.MapKeys() + err = mw.WriteMapHeader(uint32(len(ks))) + if err != nil { + return + } + for _, key := range ks { + val := v.MapIndex(key) + err = mw.WriteString(key.String()) + if err != nil { + return + } + err = mw.WriteIntf(val.Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeSlice(v reflect.Value) (err error) { + // is []byte + if v.Type().ConvertibleTo(btsType) { + return mw.WriteBytes(v.Bytes()) + } + + sz := uint32(v.Len()) + err = mw.WriteArrayHeader(sz) + if err != nil { + return + } + for i := uint32(0); i < sz; i++ { + err = mw.WriteIntf(v.Index(int(i)).Interface()) + if err != nil { + return + } + } + return +} + +func (mw *Writer) writeStruct(v reflect.Value) error { + if enc, ok := v.Interface().(Encodable); ok { + return enc.EncodeMsg(mw) + } + return fmt.Errorf("msgp: unsupported type: %s", v.Type()) +} + +func (mw *Writer) writeVal(v reflect.Value) error { + if !isSupported(v.Kind()) { + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) + } + + // shortcut for nil values + if v.IsNil() { + return mw.WriteNil() + } + switch v.Kind() { + case reflect.Bool: + return mw.WriteBool(v.Bool()) + + case reflect.Float32, reflect.Float64: + return mw.WriteFloat64(v.Float()) + + case reflect.Complex64, reflect.Complex128: + return mw.WriteComplex128(v.Complex()) + + case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8: + return mw.WriteInt64(v.Int()) + + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + mw.WriteNil() + } + return mw.writeVal(v.Elem()) + + case reflect.Map: + return mw.writeMap(v) + + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint8: + return mw.WriteUint64(v.Uint()) + + case reflect.String: + return mw.WriteString(v.String()) + + case reflect.Slice, reflect.Array: + return mw.writeSlice(v) + + case reflect.Struct: + return mw.writeStruct(v) + + } + return fmt.Errorf("msgp: msgp/enc: type %q not supported", v.Type()) +} + +// is the reflect.Kind encodable? +func isSupported(k reflect.Kind) bool { + switch k { + case reflect.Func, reflect.Chan, reflect.Invalid, reflect.UnsafePointer: + return false + default: + return true + } +} + +// GuessSize guesses the size of the underlying +// value of 'i'. If the underlying value is not +// a simple builtin (or []byte), GuessSize defaults +// to 512. +func GuessSize(i interface{}) int { + if i == nil { + return NilSize + } + + switch i := i.(type) { + case Sizer: + return i.Msgsize() + case Extension: + return ExtensionPrefixSize + i.Len() + case float64: + return Float64Size + case float32: + return Float32Size + case uint8, uint16, uint32, uint64, uint: + return UintSize + case int8, int16, int32, int64, int: + return IntSize + case []byte: + return BytesPrefixSize + len(i) + case string: + return StringPrefixSize + len(i) + case complex64: + return Complex64Size + case complex128: + return Complex128Size + case bool: + return BoolSize + case map[string]interface{}: + s := MapHeaderSize + for key, val := range i { + s += StringPrefixSize + len(key) + GuessSize(val) + } + return s + case map[string]string: + s := MapHeaderSize + for key, val := range i { + s += 2*StringPrefixSize + len(key) + len(val) + } + return s + default: + return 512 + } +} diff --git a/vendor/github.com/tinylib/msgp/msgp/write_bytes.go b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go new file mode 100644 index 0000000000..eaa03c46eb --- /dev/null +++ b/vendor/github.com/tinylib/msgp/msgp/write_bytes.go @@ -0,0 +1,411 @@ +package msgp + +import ( + "math" + "reflect" + "time" +) + +// ensure 'sz' extra bytes in 'b' btw len(b) and cap(b) +func ensure(b []byte, sz int) ([]byte, int) { + l := len(b) + c := cap(b) + if c-l < sz { + o := make([]byte, (2*c)+sz) // exponential growth + n := copy(o, b) + return o[:n+sz], n + } + return b[:l+sz], l +} + +// AppendMapHeader appends a map header with the +// given size to the slice +func AppendMapHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixmap(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], mmap16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], mmap32, sz) + return o + } +} + +// AppendArrayHeader appends an array header with +// the given size to the slice +func AppendArrayHeader(b []byte, sz uint32) []byte { + switch { + case sz <= 15: + return append(b, wfixarray(uint8(sz))) + + case sz <= math.MaxUint16: + o, n := ensure(b, 3) + prefixu16(o[n:], marray16, uint16(sz)) + return o + + default: + o, n := ensure(b, 5) + prefixu32(o[n:], marray32, sz) + return o + } +} + +// AppendNil appends a 'nil' byte to the slice +func AppendNil(b []byte) []byte { return append(b, mnil) } + +// AppendFloat64 appends a float64 to the slice +func AppendFloat64(b []byte, f float64) []byte { + o, n := ensure(b, Float64Size) + prefixu64(o[n:], mfloat64, math.Float64bits(f)) + return o +} + +// AppendFloat32 appends a float32 to the slice +func AppendFloat32(b []byte, f float32) []byte { + o, n := ensure(b, Float32Size) + prefixu32(o[n:], mfloat32, math.Float32bits(f)) + return o +} + +// AppendInt64 appends an int64 to the slice +func AppendInt64(b []byte, i int64) []byte { + if i >= 0 { + switch { + case i <= math.MaxInt8: + return append(b, wfixint(uint8(i))) + case i <= math.MaxInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i <= math.MaxInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } + } + switch { + case i >= -32: + return append(b, wnfixint(int8(i))) + case i >= math.MinInt8: + o, n := ensure(b, 2) + putMint8(o[n:], int8(i)) + return o + case i >= math.MinInt16: + o, n := ensure(b, 3) + putMint16(o[n:], int16(i)) + return o + case i >= math.MinInt32: + o, n := ensure(b, 5) + putMint32(o[n:], int32(i)) + return o + default: + o, n := ensure(b, 9) + putMint64(o[n:], i) + return o + } +} + +// AppendInt appends an int to the slice +func AppendInt(b []byte, i int) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt8 appends an int8 to the slice +func AppendInt8(b []byte, i int8) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt16 appends an int16 to the slice +func AppendInt16(b []byte, i int16) []byte { return AppendInt64(b, int64(i)) } + +// AppendInt32 appends an int32 to the slice +func AppendInt32(b []byte, i int32) []byte { return AppendInt64(b, int64(i)) } + +// AppendUint64 appends a uint64 to the slice +func AppendUint64(b []byte, u uint64) []byte { + switch { + case u <= (1<<7)-1: + return append(b, wfixint(uint8(u))) + + case u <= math.MaxUint8: + o, n := ensure(b, 2) + putMuint8(o[n:], uint8(u)) + return o + + case u <= math.MaxUint16: + o, n := ensure(b, 3) + putMuint16(o[n:], uint16(u)) + return o + + case u <= math.MaxUint32: + o, n := ensure(b, 5) + putMuint32(o[n:], uint32(u)) + return o + + default: + o, n := ensure(b, 9) + putMuint64(o[n:], u) + return o + + } +} + +// AppendUint appends a uint to the slice +func AppendUint(b []byte, u uint) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint8 appends a uint8 to the slice +func AppendUint8(b []byte, u uint8) []byte { return AppendUint64(b, uint64(u)) } + +// AppendByte is analogous to AppendUint8 +func AppendByte(b []byte, u byte) []byte { return AppendUint8(b, uint8(u)) } + +// AppendUint16 appends a uint16 to the slice +func AppendUint16(b []byte, u uint16) []byte { return AppendUint64(b, uint64(u)) } + +// AppendUint32 appends a uint32 to the slice +func AppendUint32(b []byte, u uint32) []byte { return AppendUint64(b, uint64(u)) } + +// AppendBytes appends bytes to the slice as MessagePack 'bin' data +func AppendBytes(b []byte, bts []byte) []byte { + sz := len(bts) + var o []byte + var n int + switch { + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mbin8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mbin16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mbin32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], bts)] +} + +// AppendBool appends a bool to the slice +func AppendBool(b []byte, t bool) []byte { + if t { + return append(b, mtrue) + } + return append(b, mfalse) +} + +// AppendString appends a string as a MessagePack 'str' to the slice +func AppendString(b []byte, s string) []byte { + sz := len(s) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], s)] +} + +// AppendStringFromBytes appends a []byte +// as a MessagePack 'str' to the slice 'b.' +func AppendStringFromBytes(b []byte, str []byte) []byte { + sz := len(str) + var n int + var o []byte + switch { + case sz <= 31: + o, n = ensure(b, 1+sz) + o[n] = wfixstr(uint8(sz)) + n++ + case sz <= math.MaxUint8: + o, n = ensure(b, 2+sz) + prefixu8(o[n:], mstr8, uint8(sz)) + n += 2 + case sz <= math.MaxUint16: + o, n = ensure(b, 3+sz) + prefixu16(o[n:], mstr16, uint16(sz)) + n += 3 + default: + o, n = ensure(b, 5+sz) + prefixu32(o[n:], mstr32, uint32(sz)) + n += 5 + } + return o[:n+copy(o[n:], str)] +} + +// AppendComplex64 appends a complex64 to the slice as a MessagePack extension +func AppendComplex64(b []byte, c complex64) []byte { + o, n := ensure(b, Complex64Size) + o[n] = mfixext8 + o[n+1] = Complex64Extension + big.PutUint32(o[n+2:], math.Float32bits(real(c))) + big.PutUint32(o[n+6:], math.Float32bits(imag(c))) + return o +} + +// AppendComplex128 appends a complex128 to the slice as a MessagePack extension +func AppendComplex128(b []byte, c complex128) []byte { + o, n := ensure(b, Complex128Size) + o[n] = mfixext16 + o[n+1] = Complex128Extension + big.PutUint64(o[n+2:], math.Float64bits(real(c))) + big.PutUint64(o[n+10:], math.Float64bits(imag(c))) + return o +} + +// AppendTime appends a time.Time to the slice as a MessagePack extension +func AppendTime(b []byte, t time.Time) []byte { + o, n := ensure(b, TimeSize) + t = t.UTC() + o[n] = mext8 + o[n+1] = 12 + o[n+2] = TimeExtension + putUnix(o[n+3:], t.Unix(), int32(t.Nanosecond())) + return o +} + +// AppendMapStrStr appends a map[string]string to the slice +// as a MessagePack map with 'str'-type keys and values +func AppendMapStrStr(b []byte, m map[string]string) []byte { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + for key, val := range m { + b = AppendString(b, key) + b = AppendString(b, val) + } + return b +} + +// AppendMapStrIntf appends a map[string]interface{} to the slice +// as a MessagePack map with 'str'-type keys. +func AppendMapStrIntf(b []byte, m map[string]interface{}) ([]byte, error) { + sz := uint32(len(m)) + b = AppendMapHeader(b, sz) + var err error + for key, val := range m { + b = AppendString(b, key) + b, err = AppendIntf(b, val) + if err != nil { + return b, err + } + } + return b, nil +} + +// AppendIntf appends the concrete type of 'i' to the +// provided []byte. 'i' must be one of the following: +// - 'nil' +// - A bool, float, string, []byte, int, uint, or complex +// - A map[string]interface{} or map[string]string +// - A []T, where T is another supported type +// - A *T, where T is another supported type +// - A type that satisfieds the msgp.Marshaler interface +// - A type that satisfies the msgp.Extension interface +func AppendIntf(b []byte, i interface{}) ([]byte, error) { + if i == nil { + return AppendNil(b), nil + } + + // all the concrete types + // for which we have methods + switch i := i.(type) { + case Marshaler: + return i.MarshalMsg(b) + case Extension: + return AppendExtension(b, i) + case bool: + return AppendBool(b, i), nil + case float32: + return AppendFloat32(b, i), nil + case float64: + return AppendFloat64(b, i), nil + case complex64: + return AppendComplex64(b, i), nil + case complex128: + return AppendComplex128(b, i), nil + case string: + return AppendString(b, i), nil + case []byte: + return AppendBytes(b, i), nil + case int8: + return AppendInt8(b, i), nil + case int16: + return AppendInt16(b, i), nil + case int32: + return AppendInt32(b, i), nil + case int64: + return AppendInt64(b, i), nil + case int: + return AppendInt64(b, int64(i)), nil + case uint: + return AppendUint64(b, uint64(i)), nil + case uint8: + return AppendUint8(b, i), nil + case uint16: + return AppendUint16(b, i), nil + case uint32: + return AppendUint32(b, i), nil + case uint64: + return AppendUint64(b, i), nil + case time.Time: + return AppendTime(b, i), nil + case map[string]interface{}: + return AppendMapStrIntf(b, i) + case map[string]string: + return AppendMapStrStr(b, i), nil + case []interface{}: + b = AppendArrayHeader(b, uint32(len(i))) + var err error + for _, k := range i { + b, err = AppendIntf(b, k) + if err != nil { + return b, err + } + } + return b, nil + } + + var err error + v := reflect.ValueOf(i) + switch v.Kind() { + case reflect.Array, reflect.Slice: + l := v.Len() + b = AppendArrayHeader(b, uint32(l)) + for i := 0; i < l; i++ { + b, err = AppendIntf(b, v.Index(i).Interface()) + if err != nil { + return b, err + } + } + return b, nil + case reflect.Ptr: + if v.IsNil() { + return AppendNil(b), err + } + b, err = AppendIntf(b, v.Elem().Interface()) + return b, err + default: + return b, &ErrUnsupportedType{T: v.Type()} + } +} diff --git a/vendor/github.com/weaveworks/common/LICENSE b/vendor/github.com/weaveworks/common/LICENSE new file mode 100644 index 0000000000..a710357f3b --- /dev/null +++ b/vendor/github.com/weaveworks/common/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2014-2016 Weaveworks Ltd. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/weaveworks/common/backoff/backoff.go b/vendor/github.com/weaveworks/common/backoff/backoff.go new file mode 100644 index 0000000000..94f5ad6fbf --- /dev/null +++ b/vendor/github.com/weaveworks/common/backoff/backoff.go @@ -0,0 +1,96 @@ +package backoff + +import ( + "time" + + log "github.com/Sirupsen/logrus" +) + +type backoff struct { + f func() (bool, error) + quit, done chan struct{} + msg string + initialBackoff, maxBackoff time.Duration +} + +// Interface does f in a loop, sleeping for initialBackoff between +// each iterations. If it hits an error, it exponentially backs +// off to maxBackoff. Backoff will log when it backs off, but +// will stop logging when it reaches maxBackoff. It will also +// log on first success. +type Interface interface { + Start() + Stop() + SetInitialBackoff(time.Duration) + SetMaxBackoff(time.Duration) +} + +// New makes a new Interface +func New(f func() (bool, error), msg string) Interface { + return &backoff{ + f: f, + quit: make(chan struct{}), + done: make(chan struct{}), + msg: msg, + initialBackoff: 10 * time.Second, + maxBackoff: 60 * time.Second, + } +} + +func (b *backoff) SetInitialBackoff(d time.Duration) { + b.initialBackoff = d +} + +func (b *backoff) SetMaxBackoff(d time.Duration) { + b.maxBackoff = d +} + +// Stop the backoff, and waits for it to stop. +func (b *backoff) Stop() { + close(b.quit) + <-b.done +} + +// Start the backoff. Can only be called once. +func (b *backoff) Start() { + defer close(b.done) + backoff := b.initialBackoff + shouldLog := true + + for { + done, err := b.f() + if done { + return + } + + if err != nil { + backoff *= 2 + if backoff > b.maxBackoff { + backoff = b.maxBackoff + } + } else if backoff > b.initialBackoff { + backoff = b.initialBackoff + shouldLog = true + } + + if shouldLog { + if err != nil { + log.Warnf("Error %s, backing off %s: %s", + b.msg, backoff, err) + } else { + log.Infof("Success %s", b.msg) + } + } + + if backoff >= b.maxBackoff || err == nil { + shouldLog = false + } + + select { + case <-time.After(backoff): + case <-b.quit: + return + } + } + +} diff --git a/vendor/github.com/weaveworks/common/exec/exec.go b/vendor/github.com/weaveworks/common/exec/exec.go new file mode 100644 index 0000000000..fc0b27ad1a --- /dev/null +++ b/vendor/github.com/weaveworks/common/exec/exec.go @@ -0,0 +1,30 @@ +package exec + +import ( + "io" + "os/exec" +) + +// Cmd is a hook for mocking +type Cmd interface { + StdoutPipe() (io.ReadCloser, error) + StderrPipe() (io.ReadCloser, error) + Start() error + Wait() error + Kill() error + Output() ([]byte, error) + Run() error +} + +// Command is a hook for mocking +var Command = func(name string, args ...string) Cmd { + return &realCmd{exec.Command(name, args...)} +} + +type realCmd struct { + *exec.Cmd +} + +func (c *realCmd) Kill() error { + return c.Cmd.Process.Kill() +} diff --git a/vendor/github.com/weaveworks/common/fs/fs.go b/vendor/github.com/weaveworks/common/fs/fs.go new file mode 100644 index 0000000000..6b9eb41629 --- /dev/null +++ b/vendor/github.com/weaveworks/common/fs/fs.go @@ -0,0 +1,94 @@ +package fs + +import ( + "io" + "io/ioutil" + "os" + "syscall" +) + +// Interface is the filesystem interface type. +type Interface interface { + ReadDir(string) ([]os.FileInfo, error) + ReadDirNames(string) ([]string, error) + ReadFile(string) ([]byte, error) + Lstat(string, *syscall.Stat_t) error + Stat(string, *syscall.Stat_t) error + Open(string) (io.ReadWriteCloser, error) +} + +type realFS struct{} + +// FS is the way you should access the filesystem. +var fs Interface = realFS{} + +func (realFS) ReadDir(path string) ([]os.FileInfo, error) { + return ioutil.ReadDir(path) +} + +func (realFS) ReadDirNames(path string) ([]string, error) { + fh, err := os.Open(path) + if err != nil { + return nil, err + } + defer fh.Close() + return fh.Readdirnames(-1) +} + +func (realFS) ReadFile(path string) ([]byte, error) { + return ioutil.ReadFile(path) +} + +func (realFS) Lstat(path string, stat *syscall.Stat_t) error { + return syscall.Lstat(path, stat) +} + +func (realFS) Stat(path string, stat *syscall.Stat_t) error { + return syscall.Stat(path, stat) +} + +func (realFS) Open(path string) (io.ReadWriteCloser, error) { + return os.Open(path) +} + +// trampolines here to allow users to do fs.ReadDir etc + +// ReadDir see ioutil.ReadDir +func ReadDir(path string) ([]os.FileInfo, error) { + return fs.ReadDir(path) +} + +// ReadDirNames see os.File.ReadDirNames +func ReadDirNames(path string) ([]string, error) { + return fs.ReadDirNames(path) +} + +// ReadFile see ioutil.ReadFile +func ReadFile(path string) ([]byte, error) { + return fs.ReadFile(path) +} + +// Lstat see syscall.Lstat +func Lstat(path string, stat *syscall.Stat_t) error { + return fs.Lstat(path, stat) +} + +// Stat see syscall.Stat +func Stat(path string, stat *syscall.Stat_t) error { + return fs.Stat(path, stat) +} + +// Open see os.Open +func Open(path string) (io.ReadWriteCloser, error) { + return fs.Open(path) +} + +// Mock is used to switch out the filesystem for a mock. +func Mock(mock Interface) { + fs = mock +} + +// Restore puts back the real filesystem. +func Restore() { + fs = realFS{} +} diff --git a/vendor/github.com/weaveworks/common/instrument/instrument.go b/vendor/github.com/weaveworks/common/instrument/instrument.go new file mode 100644 index 0000000000..d90aa90d3b --- /dev/null +++ b/vendor/github.com/weaveworks/common/instrument/instrument.go @@ -0,0 +1,53 @@ +package instrument + +import ( + "time" + + "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/prometheus/client_golang/prometheus" + "golang.org/x/net/context" +) + +// ErrorCode converts an error in to an http-style error-code. +func ErrorCode(err error) string { + if err == nil { + return "200" + } + return "500" +} + +// TimeRequestHistogram runs 'f' and records how long it took in the given Prometheus +// histogram metric. If 'f' returns successfully, record a "200". Otherwise, record +// "500". It will also emit an OpenTracing span if you have a global tracer configured. +// +// If you want more complicated logic for translating errors into statuses, +// use 'TimeRequestStatus'. +func TimeRequestHistogram(ctx context.Context, method string, metric *prometheus.HistogramVec, f func(context.Context) error) error { + return TimeRequestHistogramStatus(ctx, method, metric, ErrorCode, f) +} + +// TimeRequestHistogramStatus runs 'f' and records how long it took in the given +// Prometheus histogram metric. It will also emit an OpenTracing span if you have +// a global tracer configured. +// +// toStatusCode is a function that translates errors returned by 'f' into +// HTTP-like status codes. +func TimeRequestHistogramStatus(ctx context.Context, method string, metric *prometheus.HistogramVec, toStatusCode func(error) string, f func(context.Context) error) error { + if toStatusCode == nil { + toStatusCode = ErrorCode + } + + sp, newCtx := opentracing.StartSpanFromContext(ctx, method) + ext.SpanKindRPCClient.Set(sp) + startTime := time.Now() + + err := f(newCtx) + + if err != nil { + ext.Error.Set(sp, true) + } + sp.Finish() + metric.WithLabelValues(method, toStatusCode(err)).Observe(time.Now().Sub(startTime).Seconds()) + return err +} diff --git a/vendor/github.com/weaveworks/common/logging/event.go b/vendor/github.com/weaveworks/common/logging/event.go new file mode 100644 index 0000000000..ddffc73e25 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/event.go @@ -0,0 +1,135 @@ +package logging + +import ( + "fmt" + "net" + "net/http" + "strconv" + + log "github.com/Sirupsen/logrus" + "github.com/fluent/fluent-logger-golang/fluent" +) + +const maxBufferedEvents = 100 + +// Event is a user event to be sent to out analytics system +type Event struct { + ID string `msg:"event"` + SessionID string `msg:"session_id"` + Product string `msg:"product"` + Version string `msg:"version"` + UserAgent string `msg:"user_agent"` + ClientID string `msg:"client_id"` + OrganizationID string `msg:"org_id"` + UserID string `msg:"user_id"` + Values string `msg:"values"` +} + +// EventLogger logs events to the analytics system +type EventLogger struct { + stop chan struct{} + events chan Event + logger *fluent.Fluent +} + +// NewEventLogger creates a new EventLogger. +func NewEventLogger(fluentHostPort string) (*EventLogger, error) { + host, port, err := net.SplitHostPort(fluentHostPort) + if err != nil { + return nil, err + } + intPort, err := strconv.Atoi(port) + if err != nil { + return nil, err + } + logger, err := fluent.New(fluent.Config{ + FluentPort: intPort, + FluentHost: host, + AsyncConnect: true, + MaxRetry: -1, + }) + if err != nil { + return nil, err + } + + el := &EventLogger{ + stop: make(chan struct{}), + events: make(chan Event, maxBufferedEvents), + logger: logger, + } + go el.logLoop() + return el, nil +} + +func (el *EventLogger) post(e Event) { + if err := el.logger.Post("events", e); err != nil { + log.Warnf("EventLogger: failed to log event: %v", e) + } +} + +func (el *EventLogger) logLoop() { + for done := false; !done; { + select { + case event := <-el.events: + el.post(event) + case <-el.stop: + done = true + } + } + + // flush remaining events + for done := false; !done; { + select { + case event := <-el.events: + el.post(event) + default: + done = true + } + } + + el.logger.Close() +} + +// Close closes and deallocates the event logger +func (el *EventLogger) Close() error { + close(el.stop) + return nil +} + +// LogEvent logs an event to the analytics system +func (el *EventLogger) LogEvent(e Event) error { + select { + case <-el.stop: + return fmt.Errorf("Stopping, discarding event: %v", e) + default: + } + + select { + case el.events <- e: // Put event in the channel unless it is full + return nil + default: + // full + } + return fmt.Errorf("Reached event buffer limit (%d), discarding event: %v", maxBufferedEvents, e) +} + +// HTTPEventExtractor extracts an event from an http requests indicating whether it should be loggged +type HTTPEventExtractor func(*http.Request) (Event, bool) + +// HTTPEventLogger logs an events extracted from an http request +type HTTPEventLogger struct { + Extractor HTTPEventExtractor + Logger *EventLogger +} + +// Wrap implements middleware.Wrap() +func (el HTTPEventLogger) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if event, shouldLog := el.Extractor(r); shouldLog { + if err := el.Logger.LogEvent(event); err != nil { + log.Warnf("HTTPEventLogger: failed to log event: %v", err) + } + } + next.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/weaveworks/common/logging/logging.go b/vendor/github.com/weaveworks/common/logging/logging.go new file mode 100644 index 0000000000..296bee12f4 --- /dev/null +++ b/vendor/github.com/weaveworks/common/logging/logging.go @@ -0,0 +1,45 @@ +package logging + +import ( + "bytes" + "fmt" + "os" + "strings" + + log "github.com/Sirupsen/logrus" +) + +// Setup configures logging output to stderr, sets the log level and sets the formatter. +func Setup(logLevel string) error { + log.SetOutput(os.Stderr) + level, err := log.ParseLevel(logLevel) + if err != nil { + return fmt.Errorf("Error parsing log level: %v", err) + } + log.SetLevel(level) + log.SetFormatter(&textFormatter{}) + return nil +} + +type textFormatter struct{} + +// Based off logrus.TextFormatter, which behaves completely +// differently when you don't want colored output +func (f *textFormatter) Format(entry *log.Entry) ([]byte, error) { + b := &bytes.Buffer{} + + levelText := strings.ToUpper(entry.Level.String())[0:4] + timeStamp := entry.Time.Format("2006/01/02 15:04:05.000000") + if len(entry.Data) > 0 { + fmt.Fprintf(b, "%s: %s %-44s ", levelText, timeStamp, entry.Message) + for k, v := range entry.Data { + fmt.Fprintf(b, " %s=%v", k, v) + } + } else { + // No padding when there's no fields + fmt.Fprintf(b, "%s: %s %s", levelText, timeStamp, entry.Message) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} diff --git a/vendor/github.com/weaveworks/common/middleware/errorhandler.go b/vendor/github.com/weaveworks/common/middleware/errorhandler.go new file mode 100644 index 0000000000..0f7ab6141f --- /dev/null +++ b/vendor/github.com/weaveworks/common/middleware/errorhandler.go @@ -0,0 +1,94 @@ +package middleware + +import ( + "bufio" + "fmt" + "net" + "net/http" +) + +func copyHeaders(src, dest http.Header) { + for k, v := range src { + dest[k] = v + } +} + +// ErrorHandler lets you call an alternate http handler upon a certain response code. +// Note it will assume a 200 if the wrapped handler does not write anything +type ErrorHandler struct { + Code int + Handler http.Handler +} + +// Wrap implements Middleware +func (e ErrorHandler) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + i := newErrorInterceptor(w, e.Code) + next.ServeHTTP(i, r) + if !i.gotCode { + i.WriteHeader(http.StatusOK) + } + if i.intercepted { + e.Handler.ServeHTTP(w, r) + } + }) +} + +// errorInterceptor wraps an underlying ResponseWriter and buffers all header changes, until it knows the return code. +// It then passes everything through, unless the code matches the target code, in which case it will discard everything. +type errorInterceptor struct { + originalWriter http.ResponseWriter + targetCode int + headers http.Header + gotCode bool + intercepted bool +} + +func newErrorInterceptor(w http.ResponseWriter, code int) *errorInterceptor { + i := errorInterceptor{originalWriter: w, targetCode: code} + i.headers = make(http.Header) + copyHeaders(w.Header(), i.headers) + return &i +} + +// Header implements http.ResponseWriter +func (i *errorInterceptor) Header() http.Header { + return i.headers +} + +// WriteHeader implements http.ResponseWriter +func (i *errorInterceptor) WriteHeader(code int) { + if i.gotCode { + panic("errorInterceptor.WriteHeader() called twice") + } + + i.gotCode = true + if code == i.targetCode { + i.intercepted = true + } else { + copyHeaders(i.headers, i.originalWriter.Header()) + i.originalWriter.WriteHeader(code) + } +} + +// Write implements http.ResponseWriter +func (i *errorInterceptor) Write(data []byte) (int, error) { + if !i.gotCode { + i.WriteHeader(http.StatusOK) + } + if !i.intercepted { + return i.originalWriter.Write(data) + } + return len(data), nil +} + +// errorInterceptor also implements net.Hijacker, to let the downstream Handler +// hijack the connection. This is needed, for example, for working with websockets. +func (i *errorInterceptor) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj, ok := i.originalWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("error interceptor: can't cast original ResponseWriter to Hijacker") + } + i.gotCode = true + return hj.Hijack() +} diff --git a/vendor/github.com/weaveworks/common/middleware/instrument.go b/vendor/github.com/weaveworks/common/middleware/instrument.go new file mode 100644 index 0000000000..676a59f6ab --- /dev/null +++ b/vendor/github.com/weaveworks/common/middleware/instrument.go @@ -0,0 +1,84 @@ +package middleware + +import ( + "net/http" + "regexp" + "strconv" + "strings" + "time" + + "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus" +) + +// Instrument is a Middleware which records timings for every HTTP request +type Instrument struct { + RouteMatcher interface { + Match(*http.Request, *mux.RouteMatch) bool + } + Duration *prometheus.HistogramVec +} + +func isWSHandshakeRequest(req *http.Request) bool { + return strings.ToLower(req.Header.Get("Upgrade")) == "websocket" && + strings.ToLower(req.Header.Get("Connection")) == "upgrade" +} + +// Wrap implements middleware.Interface +func (i Instrument) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + begin := time.Now() + isWS := strconv.FormatBool(isWSHandshakeRequest(r)) + interceptor := &interceptor{ResponseWriter: w, statusCode: http.StatusOK} + route := i.getRouteName(r) + next.ServeHTTP(interceptor, r) + var ( + status = strconv.Itoa(interceptor.statusCode) + took = time.Since(begin) + ) + i.Duration.WithLabelValues(r.Method, route, status, isWS).Observe(took.Seconds()) + }) +} + +// Return a name identifier for ths request. There are three options: +// 1. The request matches a gorilla mux route, with a name. Use that. +// 2. The request matches an unamed gorilla mux router. Munge the path +// template such that templates like '/api/{org}/foo' come out as +// 'api_org_foo'. +// 3. The request doesn't match a mux route. Munge the Path in the same +// manner as (2). +// We do all this as we do not wish to emit high cardinality labels to +// prometheus. +func (i Instrument) getRouteName(r *http.Request) string { + var routeMatch mux.RouteMatch + if i.RouteMatcher != nil && i.RouteMatcher.Match(r, &routeMatch) { + if name := routeMatch.Route.GetName(); name != "" { + return name + } + if tmpl, err := routeMatch.Route.GetPathTemplate(); err != nil { + return MakeLabelValue(tmpl) + } + } + return MakeLabelValue(r.URL.Path) +} + +var invalidChars = regexp.MustCompile(`[^a-zA-Z0-9]+`) + +// MakeLabelValue converts a Gorilla mux path to a string suitable for use in +// a Prometheus label value. +func MakeLabelValue(path string) string { + // Convert non-alnums to underscores. + result := invalidChars.ReplaceAllString(path, "_") + + // Trim leading and trailing underscores. + result = strings.Trim(result, "_") + + // Make it all lowercase + result = strings.ToLower(result) + + // Special case. + if result == "" { + result = "root" + } + return result +} diff --git a/vendor/github.com/weaveworks/common/middleware/logging.go b/vendor/github.com/weaveworks/common/middleware/logging.go new file mode 100644 index 0000000000..e05a80ce14 --- /dev/null +++ b/vendor/github.com/weaveworks/common/middleware/logging.go @@ -0,0 +1,69 @@ +package middleware + +import ( + "bufio" + "fmt" + "net" + "net/http" + "time" + + log "github.com/Sirupsen/logrus" +) + +// Log middleware logs http requests +type Log struct { + LogSuccess bool // LogSuccess true -> log successful queries; false -> only log failed queries +} + +// Wrap implements Middleware +func (l Log) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + begin := time.Now() + uri := r.RequestURI // capture the URI before running next, as it may get rewritten + i := &interceptor{ResponseWriter: w, statusCode: http.StatusOK} + next.ServeHTTP(i, r) + if l.LogSuccess || !(100 <= i.statusCode && i.statusCode < 400) { + log.Infof("%s %s (%d) %s", r.Method, uri, i.statusCode, time.Since(begin)) + } + }) +} + +// Logging middleware logs each HTTP request method, path, response code and +// duration for all HTTP requests. +var Logging = Log{ + LogSuccess: true, +} + +// LogFailed middleware logs each HTTP request method, path, response code and +// duration for non-2xx HTTP requests. +var LogFailed = Log{ + LogSuccess: false, +} + +// interceptor implements WriteHeader to intercept status codes. WriteHeader +// may not be called on success, so initialize statusCode with the status you +// want to report on success, i.e. http.StatusOK. +// +// interceptor also implements net.Hijacker, to let the downstream Handler +// hijack the connection. This is needed, for example, for working with websockets. +type interceptor struct { + http.ResponseWriter + statusCode int + recorded bool +} + +func (i *interceptor) WriteHeader(code int) { + if !i.recorded { + i.statusCode = code + i.recorded = true + } + i.ResponseWriter.WriteHeader(code) +} + +func (i *interceptor) Hijack() (net.Conn, *bufio.ReadWriter, error) { + hj, ok := i.ResponseWriter.(http.Hijacker) + if !ok { + return nil, nil, fmt.Errorf("interceptor: can't cast parent ResponseWriter to Hijacker") + } + return hj.Hijack() +} diff --git a/vendor/github.com/weaveworks/common/middleware/middleware.go b/vendor/github.com/weaveworks/common/middleware/middleware.go new file mode 100644 index 0000000000..ad8925ac86 --- /dev/null +++ b/vendor/github.com/weaveworks/common/middleware/middleware.go @@ -0,0 +1,33 @@ +package middleware + +import ( + "net/http" +) + +// Interface is the shared contract for all middlesware, and allows middlesware +// to wrap handlers. +type Interface interface { + Wrap(http.Handler) http.Handler +} + +// Func is to Interface as http.HandlerFunc is to http.Handler +type Func func(http.Handler) http.Handler + +// Wrap implements Interface +func (m Func) Wrap(next http.Handler) http.Handler { + return m(next) +} + +// Identity is an Interface which doesn't do anything. +var Identity Interface = Func(func(h http.Handler) http.Handler { return h }) + +// Merge produces a middleware that applies multiple middlesware in turn; +// ie Merge(f,g,h).Wrap(handler) == f.Wrap(g.Wrap(h.Wrap(handler))) +func Merge(middlesware ...Interface) Interface { + return Func(func(next http.Handler) http.Handler { + for i := len(middlesware) - 1; i >= 0; i-- { + next = middlesware[i].Wrap(next) + } + return next + }) +} diff --git a/vendor/github.com/weaveworks/common/middleware/path_rewrite.go b/vendor/github.com/weaveworks/common/middleware/path_rewrite.go new file mode 100644 index 0000000000..65eb2e9f24 --- /dev/null +++ b/vendor/github.com/weaveworks/common/middleware/path_rewrite.go @@ -0,0 +1,42 @@ +package middleware + +import ( + "net/http" + "regexp" +) + +// PathRewrite supports regex matching and replace on Request URIs +func PathRewrite(regexp *regexp.Regexp, replacement string) Interface { + return pathRewrite{ + regexp: regexp, + replacement: replacement, + } +} + +type pathRewrite struct { + regexp *regexp.Regexp + replacement string +} + +func (p pathRewrite) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.RequestURI = p.regexp.ReplaceAllString(r.RequestURI, p.replacement) + r.URL.Path = p.regexp.ReplaceAllString(r.URL.Path, p.replacement) + next.ServeHTTP(w, r) + }) +} + +// PathReplace replcase Request.RequestURI with the specified string. +func PathReplace(replacement string) Interface { + return pathReplace(replacement) +} + +type pathReplace string + +func (p pathReplace) Wrap(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.URL.Path = string(p) + r.RequestURI = string(p) + next.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/weaveworks/common/mtime/mtime.go b/vendor/github.com/weaveworks/common/mtime/mtime.go new file mode 100644 index 0000000000..dd1fc87ca0 --- /dev/null +++ b/vendor/github.com/weaveworks/common/mtime/mtime.go @@ -0,0 +1,16 @@ +package mtime + +import "time" + +// Now returns the current time. +var Now = func() time.Time { return time.Now() } + +// NowForce sets the time returned by Now to t. +func NowForce(t time.Time) { + Now = func() time.Time { return t } +} + +// NowReset makes Now returns the current time again. +func NowReset() { + Now = func() time.Time { return time.Now() } +} diff --git a/vendor/github.com/weaveworks/common/network/interface.go b/vendor/github.com/weaveworks/common/network/interface.go new file mode 100644 index 0000000000..9a94b9ac6b --- /dev/null +++ b/vendor/github.com/weaveworks/common/network/interface.go @@ -0,0 +1,33 @@ +package network + +import ( + "fmt" + "net" +) + +// GetFirstAddressOf returns the first IPv4 address of the supplied interface name. +func GetFirstAddressOf(name string) (string, error) { + inf, err := net.InterfaceByName(name) + if err != nil { + return "", err + } + + addrs, err := inf.Addrs() + if err != nil { + return "", err + } + if len(addrs) <= 0 { + return "", fmt.Errorf("No address found for %s", name) + } + + for _, addr := range addrs { + switch v := addr.(type) { + case *net.IPNet: + if ip := v.IP.To4(); ip != nil { + return v.IP.String(), nil + } + } + } + + return "", fmt.Errorf("No address found for %s", name) +} diff --git a/vendor/github.com/weaveworks/common/sanitize/sanitize.go b/vendor/github.com/weaveworks/common/sanitize/sanitize.go new file mode 100644 index 0000000000..0ba4eae074 --- /dev/null +++ b/vendor/github.com/weaveworks/common/sanitize/sanitize.go @@ -0,0 +1,44 @@ +package sanitize + +import ( + "fmt" + "net" + "net/url" + "strings" + + log "github.com/Sirupsen/logrus" +) + +// URL returns a function that sanitizes a URL string. It lets underspecified +// strings to be converted to usable URLs via some default arguments. +func URL(defaultScheme string, defaultPort int, defaultPath string) func(string) string { + if defaultScheme == "" { + defaultScheme = "http://" + } + return func(s string) string { + if s == "" { + return s // can't do much here + } + if !strings.Contains(s, "://") { + s = defaultScheme + s + } + u, err := url.Parse(s) + if err != nil { + log.Errorf("%q: %v", s, err) + return s // oh well + } + if _, port, err := net.SplitHostPort(u.Host); err != nil && defaultPort > 0 { + u.Host += fmt.Sprintf(":%d", defaultPort) + } else if port == "443" { + if u.Scheme == "ws" { + u.Scheme = "wss" + } else { + u.Scheme = "https" + } + } + if defaultPath != "" && u.Path != defaultPath { + u.Path = defaultPath + } + return u.String() + } +} diff --git a/vendor/github.com/weaveworks/common/test/diff.go b/vendor/github.com/weaveworks/common/test/diff.go new file mode 100644 index 0000000000..477ae98f96 --- /dev/null +++ b/vendor/github.com/weaveworks/common/test/diff.go @@ -0,0 +1,22 @@ +package test + +import ( + "github.com/davecgh/go-spew/spew" + "github.com/pmezard/go-difflib/difflib" +) + +// Diff diffs two arbitrary data structures, giving human-readable output. +func Diff(want, have interface{}) string { + config := spew.NewDefaultConfig() + config.ContinueOnMethod = true + config.SortKeys = true + config.SpewKeys = true + text, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(config.Sdump(want)), + B: difflib.SplitLines(config.Sdump(have)), + FromFile: "want", + ToFile: "have", + Context: 3, + }) + return "\n" + text +} diff --git a/vendor/github.com/weaveworks/common/test/exec/exec.go b/vendor/github.com/weaveworks/common/test/exec/exec.go new file mode 100644 index 0000000000..6e6d9cc899 --- /dev/null +++ b/vendor/github.com/weaveworks/common/test/exec/exec.go @@ -0,0 +1,79 @@ +package exec + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/weaveworks/common/exec" +) + +type mockCmd struct { + io.ReadCloser + quit chan struct{} +} + +type blockingReader struct { + quit chan struct{} +} + +// NewMockCmdString creates a new mock Cmd which has s on its stdout pipe +func NewMockCmdString(s string) exec.Cmd { + return &mockCmd{ + ReadCloser: struct { + io.Reader + io.Closer + }{ + bytes.NewBufferString(s), + ioutil.NopCloser(nil), + }, + quit: make(chan struct{}), + } +} + +// NewMockCmd creates a new mock Cmd with rc as its stdout pipe +func NewMockCmd(rc io.ReadCloser) exec.Cmd { + return &mockCmd{ + ReadCloser: rc, + quit: make(chan struct{}), + } +} + +func (c *mockCmd) Start() error { + return nil +} + +func (c *mockCmd) Wait() error { + return nil +} + +func (c *mockCmd) StdoutPipe() (io.ReadCloser, error) { + return c.ReadCloser, nil +} + +func (c *mockCmd) StderrPipe() (io.ReadCloser, error) { + return &blockingReader{c.quit}, nil +} + +func (c *mockCmd) Kill() error { + close(c.quit) + return nil +} + +func (c *mockCmd) Output() ([]byte, error) { + return ioutil.ReadAll(c.ReadCloser) +} + +func (c *mockCmd) Run() error { + return nil +} + +func (b *blockingReader) Read(p []byte) (n int, err error) { + <-b.quit + return 0, nil +} + +func (b *blockingReader) Close() error { + <-b.quit + return nil +} diff --git a/vendor/github.com/weaveworks/common/test/fs/fs.go b/vendor/github.com/weaveworks/common/test/fs/fs.go new file mode 100644 index 0000000000..b03565223c --- /dev/null +++ b/vendor/github.com/weaveworks/common/test/fs/fs.go @@ -0,0 +1,287 @@ +package fs + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "strings" + "syscall" + "time" + + "github.com/weaveworks/common/fs" +) + +type mockInode struct{} + +type dir struct { + mockInode + name string + entries map[string]Entry + stat syscall.Stat_t +} + +// File is a mock file +type File struct { + mockInode + FName string + FContents string + FReader io.Reader + FWriter io.Writer + FCloser io.Closer + FStat syscall.Stat_t +} + +// Entry is an entry in the mock filesystem +type Entry interface { + os.FileInfo + fs.Interface + Add(path string, e Entry) error + Remove(path string) error +} + +// Dir creates a new directory with the given entries. +func Dir(name string, entries ...Entry) Entry { + result := dir{ + name: name, + entries: map[string]Entry{}, + } + + for _, entry := range entries { + result.entries[entry.Name()] = entry + } + + return result +} + +func split(path string) (string, string) { + if !strings.HasPrefix(path, "/") { + panic(path) + } + + comps := strings.SplitN(path, "/", 3) + if len(comps) == 2 { + return comps[1], "/" + } + + return comps[1], "/" + comps[2] +} + +func (mockInode) Size() int64 { return 0 } +func (mockInode) Mode() os.FileMode { return 0 } +func (mockInode) ModTime() time.Time { return time.Now() } +func (mockInode) Sys() interface{} { return nil } + +func (p dir) Name() string { return p.name } +func (p dir) IsDir() bool { return true } + +func (p dir) ReadDir(path string) ([]os.FileInfo, error) { + if path == "/" { + result := []os.FileInfo{} + for _, v := range p.entries { + result = append(result, v) + } + return result, nil + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return nil, fmt.Errorf("Not found: %s", path) + } + + return fs.ReadDir(tail) +} + +func (p dir) ReadDirNames(path string) ([]string, error) { + if path == "/" { + result := []string{} + for _, v := range p.entries { + result = append(result, v.Name()) + } + return result, nil + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return nil, fmt.Errorf("Not found: %s", path) + } + + return fs.ReadDirNames(tail) +} + +func (p dir) ReadFile(path string) ([]byte, error) { + if path == "/" { + return nil, fmt.Errorf("I'm a directory") + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return nil, fmt.Errorf("Not found: %s", path) + } + + return fs.ReadFile(tail) +} + +func (p dir) Lstat(path string, stat *syscall.Stat_t) error { + if path == "/" { + *stat = syscall.Stat_t{Mode: syscall.S_IFDIR} + return nil + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return fmt.Errorf("Not found: %s", path) + } + + return fs.Lstat(tail, stat) +} + +func (p dir) Stat(path string, stat *syscall.Stat_t) error { + if path == "/" { + *stat = syscall.Stat_t{Mode: syscall.S_IFDIR} + return nil + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return fmt.Errorf("Not found: %s", path) + } + + return fs.Stat(tail, stat) +} + +func (p dir) Open(path string) (io.ReadWriteCloser, error) { + if path == "/" { + return nil, fmt.Errorf("I'm a directory") + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return nil, fmt.Errorf("Not found: %s", path) + } + + return fs.Open(tail) +} + +func (p dir) Add(path string, e Entry) error { + if path == "/" { + p.entries[e.Name()] = e + return nil + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + fs = Dir(head) + p.entries[head] = fs + } + + return fs.Add(tail, e) +} + +func (p dir) Remove(path string) error { + if _, ok := p.entries[strings.TrimPrefix(path, "/")]; ok { + delete(p.entries, strings.TrimPrefix(path, "/")) + return nil + } + + head, tail := split(path) + fs, ok := p.entries[head] + if !ok { + return nil + } + return fs.Remove(tail) +} + +// Name implements os.FileInfo +func (p File) Name() string { return p.FName } + +// IsDir implements os.FileInfo +func (p File) IsDir() bool { return false } + +// ReadDir implements FS +func (p File) ReadDir(path string) ([]os.FileInfo, error) { + return nil, fmt.Errorf("I'm a file") +} + +// ReadDirNames implements FS +func (p File) ReadDirNames(path string) ([]string, error) { + return nil, fmt.Errorf("I'm a file") +} + +// ReadFile implements FS +func (p File) ReadFile(path string) ([]byte, error) { + if path != "/" { + return nil, fmt.Errorf("I'm a file") + } + if p.FReader != nil { + return ioutil.ReadAll(p.FReader) + } + return []byte(p.FContents), nil +} + +// Lstat implements FS +func (p File) Lstat(path string, stat *syscall.Stat_t) error { + if path != "/" { + return fmt.Errorf("I'm a file") + } + *stat = p.FStat + return nil +} + +// Stat implements FS +func (p File) Stat(path string, stat *syscall.Stat_t) error { + if path != "/" { + return fmt.Errorf("I'm a file") + } + *stat = p.FStat + return nil +} + +// Open implements FS +func (p File) Open(path string) (io.ReadWriteCloser, error) { + if path != "/" { + return nil, fmt.Errorf("I'm a file") + } + buf := bytes.NewBuffer([]byte(p.FContents)) + s := struct { + io.Reader + io.Writer + io.Closer + }{ + buf, buf, ioutil.NopCloser(nil), + } + if p.FReader != nil { + s.Reader = p.FReader + } + if p.FWriter != nil { + s.Writer = p.FWriter + } + if p.FCloser != nil { + s.Closer = p.FCloser + } + return s, nil +} + +// Add adds a new node to the fs +func (p File) Add(path string, e Entry) error { + if path != "/" { + return fmt.Errorf("I'm a file") + } + return nil +} + +// Remove removes a node from the fs +func (p File) Remove(path string) error { + if path != "/" { + return fmt.Errorf("I'm a file") + } + return nil +} diff --git a/vendor/github.com/weaveworks/common/tools/cmd/wcloud/cli.go b/vendor/github.com/weaveworks/common/tools/cmd/wcloud/cli.go new file mode 100644 index 0000000000..ba3c355fd1 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/cmd/wcloud/cli.go @@ -0,0 +1,238 @@ +package main + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "strings" + "time" + + "github.com/olekukonko/tablewriter" + "gopkg.in/yaml.v2" +) + +// ArrayFlags allows you to collect repeated flags +type ArrayFlags []string + +func (a *ArrayFlags) String() string { + return strings.Join(*a, ",") +} + +// Set implements flags.Value +func (a *ArrayFlags) Set(value string) error { + *a = append(*a, value) + return nil +} + +func env(key, def string) string { + if val, ok := os.LookupEnv(key); ok { + return val + } + return def +} + +var ( + token = env("SERVICE_TOKEN", "") + baseURL = env("BASE_URL", "https://cloud.weave.works") +) + +func usage() { + fmt.Println(`Usage: + deploy : Deploy image to your configured env + list List recent deployments + config () Get (or set) the configured env + logs Show lots for the given deployment`) +} + +func main() { + if len(os.Args) <= 1 { + usage() + os.Exit(1) + } + + c := NewClient(token, baseURL) + + switch os.Args[1] { + case "deploy": + deploy(c, os.Args[2:]) + case "list": + list(c, os.Args[2:]) + case "config": + config(c, os.Args[2:]) + case "logs": + logs(c, os.Args[2:]) + case "events": + events(c, os.Args[2:]) + case "help": + usage() + default: + usage() + } +} + +func deploy(c Client, args []string) { + var ( + flags = flag.NewFlagSet("", flag.ContinueOnError) + username = flags.String("u", "", "Username to report to deploy service (default with be current user)") + services ArrayFlags + ) + flags.Var(&services, "service", "Service to update (can be repeated)") + if err := flags.Parse(args); err != nil { + usage() + return + } + args = flags.Args() + if len(args) != 1 { + usage() + return + } + parts := strings.SplitN(args[0], ":", 2) + if len(parts) < 2 { + usage() + return + } + if *username == "" { + user, err := user.Current() + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + *username = user.Username + } + deployment := Deployment{ + ImageName: parts[0], + Version: parts[1], + TriggeringUser: *username, + IntendedServices: services, + } + if err := c.Deploy(deployment); err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } +} + +func list(c Client, args []string) { + var ( + flags = flag.NewFlagSet("", flag.ContinueOnError) + since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results") + ) + if err := flags.Parse(args); err != nil { + usage() + return + } + through := time.Now() + from := through.Add(-*since) + deployments, err := c.GetDeployments(from.Unix(), through.Unix()) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + table := tablewriter.NewWriter(os.Stdout) + table.SetHeader([]string{"Created", "ID", "Image", "Version", "State"}) + table.SetBorder(false) + table.SetColumnSeparator(" ") + for _, deployment := range deployments { + table.Append([]string{ + deployment.CreatedAt.Format(time.RFC822), + deployment.ID, + deployment.ImageName, + deployment.Version, + deployment.State, + }) + } + table.Render() +} + +func events(c Client, args []string) { + var ( + flags = flag.NewFlagSet("", flag.ContinueOnError) + since = flags.Duration("since", 7*24*time.Hour, "How far back to fetch results") + ) + if err := flags.Parse(args); err != nil { + usage() + return + } + through := time.Now() + from := through.Add(-*since) + events, err := c.GetEvents(from.Unix(), through.Unix()) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + fmt.Println("events: ", string(events)) +} + +func loadConfig(filename string) (*Config, error) { + extension := filepath.Ext(filename) + var config Config + buf, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + if extension == ".yaml" || extension == ".yml" { + if err := yaml.Unmarshal(buf, &config); err != nil { + return nil, err + } + } else { + if err := json.NewDecoder(bytes.NewReader(buf)).Decode(&config); err != nil { + return nil, err + } + } + return &config, nil +} + +func config(c Client, args []string) { + if len(args) > 1 { + usage() + return + } + + if len(args) == 1 { + config, err := loadConfig(args[0]) + if err != nil { + fmt.Println("Error reading config:", err) + os.Exit(1) + } + + if err := c.SetConfig(config); err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + } else { + config, err := c.GetConfig() + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + buf, err := yaml.Marshal(config) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + fmt.Println(string(buf)) + } +} + +func logs(c Client, args []string) { + if len(args) != 1 { + usage() + return + } + + output, err := c.GetLogs(args[0]) + if err != nil { + fmt.Println(err.Error()) + os.Exit(1) + } + + fmt.Println(string(output)) +} diff --git a/vendor/github.com/weaveworks/common/tools/cmd/wcloud/client.go b/vendor/github.com/weaveworks/common/tools/cmd/wcloud/client.go new file mode 100644 index 0000000000..02cbbaa747 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/cmd/wcloud/client.go @@ -0,0 +1,150 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" +) + +// Client for the deployment service +type Client struct { + token string + baseURL string +} + +// NewClient makes a new Client +func NewClient(token, baseURL string) Client { + return Client{ + token: token, + baseURL: baseURL, + } +} + +func (c Client) newRequest(method, path string, body io.Reader) (*http.Request, error) { + req, err := http.NewRequest(method, c.baseURL+path, body) + if err != nil { + return nil, err + } + req.Header.Add("Authorization", fmt.Sprintf("Scope-Probe token=%s", c.token)) + return req, nil +} + +// Deploy notifies the deployment service about a new deployment +func (c Client) Deploy(deployment Deployment) error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(deployment); err != nil { + return err + } + req, err := c.newRequest("POST", "/api/deploy/deploy", &buf) + if err != nil { + return err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if res.StatusCode != 204 { + return fmt.Errorf("error making request: %s", res.Status) + } + return nil +} + +// GetDeployments returns a list of deployments +func (c Client) GetDeployments(from, through int64) ([]Deployment, error) { + req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy?from=%d&through=%d", from, through), nil) + if err != nil { + return nil, err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + return nil, fmt.Errorf("error making request: %s", res.Status) + } + var response struct { + Deployments []Deployment `json:"deployments"` + } + if err := json.NewDecoder(res.Body).Decode(&response); err != nil { + return nil, err + } + return response.Deployments, nil +} + +// GetEvents returns the raw events. +func (c Client) GetEvents(from, through int64) ([]byte, error) { + req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/event?from=%d&through=%d", from, through), nil) + if err != nil { + return nil, err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + return nil, fmt.Errorf("error making request: %s", res.Status) + } + return ioutil.ReadAll(res.Body) +} + +// GetConfig returns the current Config +func (c Client) GetConfig() (*Config, error) { + req, err := c.newRequest("GET", "/api/config/deploy", nil) + if err != nil { + return nil, err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode == 404 { + return nil, fmt.Errorf("no configuration uploaded yet") + } + if res.StatusCode != 200 { + return nil, fmt.Errorf("error making request: %s", res.Status) + } + var config Config + if err := json.NewDecoder(res.Body).Decode(&config); err != nil { + return nil, err + } + return &config, nil +} + +// SetConfig sets the current Config +func (c Client) SetConfig(config *Config) error { + var buf bytes.Buffer + if err := json.NewEncoder(&buf).Encode(config); err != nil { + return err + } + req, err := c.newRequest("POST", "/api/config/deploy", &buf) + if err != nil { + return err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return err + } + if res.StatusCode != 204 { + return fmt.Errorf("error making request: %s", res.Status) + } + return nil +} + +// GetLogs returns the logs for a given deployment. +func (c Client) GetLogs(deployID string) ([]byte, error) { + req, err := c.newRequest("GET", fmt.Sprintf("/api/deploy/deploy/%s/log", deployID), nil) + if err != nil { + return nil, err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + if res.StatusCode != 200 { + return nil, fmt.Errorf("error making request: %s", res.Status) + } + return ioutil.ReadAll(res.Body) +} diff --git a/vendor/github.com/weaveworks/common/tools/cmd/wcloud/types.go b/vendor/github.com/weaveworks/common/tools/cmd/wcloud/types.go new file mode 100644 index 0000000000..a068163c35 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/cmd/wcloud/types.go @@ -0,0 +1,43 @@ +package main + +import ( + "time" +) + +// Deployment describes a deployment +type Deployment struct { + ID string `json:"id"` + CreatedAt time.Time `json:"created_at"` + ImageName string `json:"image_name"` + Version string `json:"version"` + Priority int `json:"priority"` + State string `json:"status"` + + TriggeringUser string `json:"triggering_user"` + IntendedServices []string `json:"intended_services"` +} + +// Config for the deployment system for a user. +type Config struct { + RepoURL string `json:"repo_url" yaml:"repo_url"` + RepoBranch string `json:"repo_branch" yaml:"repo_branch"` + RepoPath string `json:"repo_path" yaml:"repo_path"` + RepoKey string `json:"repo_key" yaml:"repo_key"` + KubeconfigPath string `json:"kubeconfig_path" yaml:"kubeconfig_path"` + AutoApply bool `json:"auto_apply" yaml:"auto_apply"` + + Notifications []NotificationConfig `json:"notifications" yaml:"notifications"` + + // Globs of files not to change, relative to the route of the repo + ConfigFileBlackList []string `json:"config_file_black_list" yaml:"config_file_black_list"` + + CommitMessageTemplate string `json:"commit_message_template" yaml:"commit_message_template"` // See https://golang.org/pkg/text/template/ +} + +// NotificationConfig describes how to send notifications +type NotificationConfig struct { + SlackWebhookURL string `json:"slack_webhook_url" yaml:"slack_webhook_url"` + SlackUsername string `json:"slack_username" yaml:"slack_username"` + MessageTemplate string `json:"message_template" yaml:"message_template"` + ApplyMessageTemplate string `json:"apply_message_template" yaml:"apply_message_template"` +} diff --git a/vendor/github.com/weaveworks/common/tools/cover/cover.go b/vendor/github.com/weaveworks/common/tools/cover/cover.go new file mode 100644 index 0000000000..4c5fcfd7d6 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/cover/cover.go @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + "os" + "sort" + + "golang.org/x/tools/cover" +) + +func merge(p1, p2 *cover.Profile) *cover.Profile { + output := cover.Profile{ + FileName: p1.FileName, + Mode: p1.Mode, + } + + i, j := 0, 0 + for i < len(p1.Blocks) && j < len(p2.Blocks) { + bi, bj := p1.Blocks[i], p2.Blocks[j] + if bi.StartLine == bj.StartLine && bi.StartCol == bj.StartCol { + + if bi.EndLine != bj.EndLine || + bi.EndCol != bj.EndCol || + bi.NumStmt != bj.NumStmt { + panic("Not run on same source!") + } + + output.Blocks = append(output.Blocks, cover.ProfileBlock{ + StartLine: bi.StartLine, + StartCol: bi.StartCol, + EndLine: bi.EndLine, + EndCol: bi.EndCol, + NumStmt: bi.NumStmt, + Count: bi.Count + bj.Count, + }) + i++ + j++ + } else if bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol { + output.Blocks = append(output.Blocks, bi) + i++ + } else { + output.Blocks = append(output.Blocks, bj) + j++ + } + } + + for ; i < len(p1.Blocks); i++ { + output.Blocks = append(output.Blocks, p1.Blocks[i]) + } + + for ; j < len(p2.Blocks); j++ { + output.Blocks = append(output.Blocks, p2.Blocks[j]) + } + + return &output +} + +func print(profiles []*cover.Profile) { + fmt.Println("mode: atomic") + for _, profile := range profiles { + for _, block := range profile.Blocks { + fmt.Printf("%s:%d.%d,%d.%d %d %d\n", profile.FileName, block.StartLine, block.StartCol, + block.EndLine, block.EndCol, block.NumStmt, block.Count) + } + } +} + +// Copied from https://github.com/golang/tools/blob/master/cover/profile.go +type byFileName []*cover.Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +func main() { + outputProfiles := map[string]*cover.Profile{} + for _, input := range os.Args[1:] { + inputProfiles, err := cover.ParseProfiles(input) + if err != nil { + panic(fmt.Sprintf("Error parsing %s: %v", input, err)) + } + for _, ip := range inputProfiles { + op := outputProfiles[ip.FileName] + if op == nil { + outputProfiles[ip.FileName] = ip + } else { + outputProfiles[ip.FileName] = merge(op, ip) + } + } + } + profiles := make([]*cover.Profile, 0, len(outputProfiles)) + for _, profile := range outputProfiles { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + print(profiles) +} diff --git a/vendor/github.com/weaveworks/common/tools/runner/runner.go b/vendor/github.com/weaveworks/common/tools/runner/runner.go new file mode 100644 index 0000000000..c92ac6b5ba --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/runner/runner.go @@ -0,0 +1,289 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "net/http" + "net/url" + "os" + "os/exec" + "sort" + "strconv" + "strings" + "sync" + "time" + + "github.com/mgutz/ansi" + "github.com/weaveworks/docker/pkg/mflag" +) + +const ( + defaultSchedulerHost = "positive-cocoa-90213.appspot.com" + jsonContentType = "application/json" +) + +var ( + start = ansi.ColorCode("black+ub") + fail = ansi.ColorCode("red+b") + succ = ansi.ColorCode("green+b") + reset = ansi.ColorCode("reset") + + schedulerHost = defaultSchedulerHost + useScheduler = false + runParallel = false + verbose = false + timeout = 180 // In seconds. Three minutes ought to be enough for any test + + consoleLock = sync.Mutex{} +) + +type test struct { + name string + hosts int +} + +type schedule struct { + Tests []string `json:"tests"` +} + +type result struct { + test + errored bool + hosts []string +} + +type tests []test + +func (ts tests) Len() int { return len(ts) } +func (ts tests) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } +func (ts tests) Less(i, j int) bool { + if ts[i].hosts != ts[j].hosts { + return ts[i].hosts < ts[j].hosts + } + return ts[i].name < ts[j].name +} + +func (ts *tests) pick(available int) (test, bool) { + // pick the first test that fits in the available hosts + for i, test := range *ts { + if test.hosts <= available { + *ts = append((*ts)[:i], (*ts)[i+1:]...) + return test, true + } + } + + return test{}, false +} + +func (t test) run(hosts []string) bool { + consoleLock.Lock() + fmt.Printf("%s>>> Running %s on %s%s\n", start, t.name, hosts, reset) + consoleLock.Unlock() + + var out bytes.Buffer + + cmd := exec.Command(t.name) + cmd.Env = os.Environ() + cmd.Stdout = &out + cmd.Stderr = &out + + // replace HOSTS in env + for i, env := range cmd.Env { + if strings.HasPrefix(env, "HOSTS") { + cmd.Env[i] = fmt.Sprintf("HOSTS=%s", strings.Join(hosts, " ")) + break + } + } + + start := time.Now() + var err error + + c := make(chan error, 1) + go func() { c <- cmd.Run() }() + select { + case err = <-c: + case <-time.After(time.Duration(timeout) * time.Second): + err = fmt.Errorf("timed out") + } + + duration := float64(time.Now().Sub(start)) / float64(time.Second) + + consoleLock.Lock() + if err != nil { + fmt.Printf("%s>>> Test %s finished after %0.1f secs with error: %v%s\n", fail, t.name, duration, err, reset) + } else { + fmt.Printf("%s>>> Test %s finished with success after %0.1f secs%s\n", succ, t.name, duration, reset) + } + if err != nil || verbose { + fmt.Print(out.String()) + fmt.Println() + } + consoleLock.Unlock() + + if err != nil && useScheduler { + updateScheduler(t.name, duration) + } + + return err != nil +} + +func updateScheduler(test string, duration float64) { + req := &http.Request{ + Method: "POST", + Host: schedulerHost, + URL: &url.URL{ + Opaque: fmt.Sprintf("/record/%s/%0.2f", url.QueryEscape(test), duration), + Scheme: "http", + Host: schedulerHost, + }, + Close: true, + } + if resp, err := http.DefaultClient.Do(req); err != nil { + fmt.Printf("Error updating scheduler: %v\n", err) + } else { + resp.Body.Close() + } +} + +func getSchedule(tests []string) ([]string, error) { + var ( + project = os.Getenv("CIRCLE_PROJECT_REPONAME") + buildNum = os.Getenv("CIRCLE_BUILD_NUM") + testRun = project + "-integration-" + buildNum + shardCount = os.Getenv("CIRCLE_NODE_TOTAL") + shardID = os.Getenv("CIRCLE_NODE_INDEX") + requestBody = &bytes.Buffer{} + ) + if err := json.NewEncoder(requestBody).Encode(schedule{tests}); err != nil { + return []string{}, err + } + url := fmt.Sprintf("http://%s/schedule/%s/%s/%s", schedulerHost, testRun, shardCount, shardID) + resp, err := http.Post(url, jsonContentType, requestBody) + if err != nil { + return []string{}, err + } + var sched schedule + if err := json.NewDecoder(resp.Body).Decode(&sched); err != nil { + return []string{}, err + } + return sched.Tests, nil +} + +func getTests(testNames []string) (tests, error) { + var err error + if useScheduler { + testNames, err = getSchedule(testNames) + if err != nil { + return tests{}, err + } + } + tests := tests{} + for _, name := range testNames { + parts := strings.Split(strings.TrimSuffix(name, "_test.sh"), "_") + numHosts, err := strconv.Atoi(parts[len(parts)-1]) + if err != nil { + numHosts = 1 + } + tests = append(tests, test{name, numHosts}) + fmt.Printf("Test %s needs %d hosts\n", name, numHosts) + } + return tests, nil +} + +func summary(tests, failed tests) { + if len(failed) > 0 { + fmt.Printf("%s>>> Ran %d tests, %d failed%s\n", fail, len(tests), len(failed), reset) + for _, test := range failed { + fmt.Printf("%s>>> Fail %s%s\n", fail, test.name, reset) + } + } else { + fmt.Printf("%s>>> Ran %d tests, all succeeded%s\n", succ, len(tests), reset) + } +} + +func parallel(ts tests, hosts []string) bool { + testsCopy := ts + sort.Sort(sort.Reverse(ts)) + resultsChan := make(chan result) + outstanding := 0 + failed := tests{} + for len(ts) > 0 || outstanding > 0 { + // While we have some free hosts, try and schedule + // a test on them + for len(hosts) > 0 { + test, ok := ts.pick(len(hosts)) + if !ok { + break + } + testHosts := hosts[:test.hosts] + hosts = hosts[test.hosts:] + + go func() { + errored := test.run(testHosts) + resultsChan <- result{test, errored, testHosts} + }() + outstanding++ + } + + // Otherwise, wait for the test to finish and return + // the hosts to the pool + result := <-resultsChan + hosts = append(hosts, result.hosts...) + outstanding-- + if result.errored { + failed = append(failed, result.test) + } + } + summary(testsCopy, failed) + return len(failed) > 0 +} + +func sequential(ts tests, hosts []string) bool { + failed := tests{} + for _, test := range ts { + if test.run(hosts) { + failed = append(failed, test) + } + } + summary(ts, failed) + return len(failed) > 0 +} + +func main() { + mflag.BoolVar(&useScheduler, []string{"scheduler"}, false, "Use scheduler to distribute tests across shards") + mflag.BoolVar(&runParallel, []string{"parallel"}, false, "Run tests in parallel on hosts where possible") + mflag.BoolVar(&verbose, []string{"v"}, false, "Print output from all tests (Also enabled via DEBUG=1)") + mflag.StringVar(&schedulerHost, []string{"scheduler-host"}, defaultSchedulerHost, "Hostname of scheduler.") + mflag.IntVar(&timeout, []string{"timeout"}, 180, "Max time to run one test for, in seconds") + mflag.Parse() + + if len(os.Getenv("DEBUG")) > 0 { + verbose = true + } + + testArgs := mflag.Args() + tests, err := getTests(testArgs) + if err != nil { + fmt.Printf("Error parsing tests: %v (%v)\n", err, testArgs) + os.Exit(1) + } + + hosts := strings.Fields(os.Getenv("HOSTS")) + maxHosts := len(hosts) + if maxHosts == 0 { + fmt.Print("No HOSTS specified.\n") + os.Exit(1) + } + + var errored bool + if runParallel { + errored = parallel(tests, hosts) + } else { + errored = sequential(tests, hosts) + } + + if errored { + os.Exit(1) + } +} diff --git a/vendor/github.com/weaveworks/common/tools/socks/main.go b/vendor/github.com/weaveworks/common/tools/socks/main.go new file mode 100644 index 0000000000..83a2149803 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/socks/main.go @@ -0,0 +1,97 @@ +package main + +import ( + "fmt" + "net" + "net/http" + "os" + "strings" + "text/template" + + socks5 "github.com/armon/go-socks5" + "github.com/weaveworks/docker/pkg/mflag" + "github.com/weaveworks/weave/common/mflagext" + "golang.org/x/net/context" +) + +type pacFileParameters struct { + HostMatch string + Aliases map[string]string +} + +const ( + pacfile = ` +function FindProxyForURL(url, host) { + if(shExpMatch(host, "{{.HostMatch}}")) { + return "SOCKS5 localhost:8000"; + } + {{range $key, $value := .Aliases}} + if (host == "{{$key}}") { + return "SOCKS5 localhost:8000"; + } + {{end}} + return "DIRECT"; +} +` +) + +func main() { + var ( + as []string + hostMatch string + ) + mflagext.ListVar(&as, []string{"a", "-alias"}, []string{}, "Specify hostname aliases in the form alias:hostname. Can be repeated.") + mflag.StringVar(&hostMatch, []string{"h", "-host-match"}, "*.weave.local", "Specify main host shExpMatch expression in pacfile") + mflag.Parse() + + var aliases = map[string]string{} + for _, a := range as { + parts := strings.SplitN(a, ":", 2) + if len(parts) != 2 { + fmt.Printf("'%s' is not a valid alias.\n", a) + mflag.Usage() + os.Exit(1) + } + aliases[parts[0]] = parts[1] + } + + go socksProxy(aliases) + + t := template.Must(template.New("pacfile").Parse(pacfile)) + http.HandleFunc("/proxy.pac", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/x-ns-proxy-autoconfig") + t.Execute(w, pacFileParameters{hostMatch, aliases}) + }) + + if err := http.ListenAndServe(":8080", nil); err != nil { + panic(err) + } +} + +type aliasingResolver struct { + aliases map[string]string + socks5.NameResolver +} + +func (r aliasingResolver) Resolve(ctx context.Context, name string) (context.Context, net.IP, error) { + if alias, ok := r.aliases[name]; ok { + return r.NameResolver.Resolve(ctx, alias) + } + return r.NameResolver.Resolve(ctx, name) +} + +func socksProxy(aliases map[string]string) { + conf := &socks5.Config{ + Resolver: aliasingResolver{ + aliases: aliases, + NameResolver: socks5.DNSResolver{}, + }, + } + server, err := socks5.New(conf) + if err != nil { + panic(err) + } + if err := server.ListenAndServe("tcp", ":8000"); err != nil { + panic(err) + } +} diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/cmd/shfmt/main.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/cmd/shfmt/main.go new file mode 100644 index 0000000000..08f99e0139 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/cmd/shfmt/main.go @@ -0,0 +1,202 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package main + +import ( + "bytes" + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "regexp" + "runtime/pprof" + "strings" + + "github.com/mvdan/sh/syntax" +) + +var ( + write = flag.Bool("w", false, "write result to file instead of stdout") + list = flag.Bool("l", false, "list files whose formatting differs from shfmt's") + indent = flag.Int("i", 0, "indent: 0 for tabs (default), >0 for number of spaces") + posix = flag.Bool("p", false, "parse POSIX shell code instead of bash") + cpuprofile = flag.String("cpuprofile", "", "write cpu profile to file") + + parseMode syntax.ParseMode + printConfig syntax.PrintConfig + readBuf, writeBuf bytes.Buffer + + out io.Writer +) + +func main() { + flag.Parse() + if *cpuprofile != "" { + f, err := os.Create(*cpuprofile) + if err != nil { + log.Fatal(err) + } + pprof.StartCPUProfile(f) + defer pprof.StopCPUProfile() + } + + out = os.Stdout + printConfig.Spaces = *indent + parseMode |= syntax.ParseComments + if *posix { + parseMode |= syntax.PosixConformant + } + if flag.NArg() == 0 { + if err := formatStdin(); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } + return + } + anyErr := false + onError := func(err error) { + anyErr = true + fmt.Fprintln(os.Stderr, err) + } + for _, path := range flag.Args() { + walk(path, onError) + } + if anyErr { + os.Exit(1) + } +} + +func formatStdin() error { + if *write || *list { + return fmt.Errorf("-w and -l can only be used on files") + } + readBuf.Reset() + if _, err := io.Copy(&readBuf, os.Stdin); err != nil { + return err + } + src := readBuf.Bytes() + prog, err := syntax.Parse(src, "", parseMode) + if err != nil { + return err + } + return printConfig.Fprint(out, prog) +} + +var ( + shellFile = regexp.MustCompile(`\.(sh|bash)$`) + validShebang = regexp.MustCompile(`^#!\s?/(usr/)?bin/(env *)?(sh|bash)`) + vcsDir = regexp.MustCompile(`^\.(git|svn|hg)$`) +) + +type shellConfidence int + +const ( + notShellFile shellConfidence = iota + ifValidShebang + isShellFile +) + +func getConfidence(info os.FileInfo) shellConfidence { + name := info.Name() + switch { + case info.IsDir(), name[0] == '.', !info.Mode().IsRegular(): + return notShellFile + case shellFile.MatchString(name): + return isShellFile + case strings.Contains(name, "."): + return notShellFile // different extension + case info.Size() < 8: + return notShellFile // cannot possibly hold valid shebang + default: + return ifValidShebang + } +} + +func walk(path string, onError func(error)) { + info, err := os.Stat(path) + if err != nil { + onError(err) + return + } + if !info.IsDir() { + if err := formatPath(path, false); err != nil { + onError(err) + } + return + } + filepath.Walk(path, func(path string, info os.FileInfo, err error) error { + if info.IsDir() && vcsDir.MatchString(info.Name()) { + return filepath.SkipDir + } + if err != nil { + onError(err) + return nil + } + conf := getConfidence(info) + if conf == notShellFile { + return nil + } + err = formatPath(path, conf == ifValidShebang) + if err != nil && !os.IsNotExist(err) { + onError(err) + } + return nil + }) +} + +func empty(f *os.File) error { + if err := f.Truncate(0); err != nil { + return err + } + _, err := f.Seek(0, 0) + return err +} + +func formatPath(path string, checkShebang bool) error { + openMode := os.O_RDONLY + if *write { + openMode = os.O_RDWR + } + f, err := os.OpenFile(path, openMode, 0) + if err != nil { + return err + } + defer f.Close() + readBuf.Reset() + if _, err := io.Copy(&readBuf, f); err != nil { + return err + } + src := readBuf.Bytes() + if checkShebang && !validShebang.Match(src[:32]) { + return nil + } + prog, err := syntax.Parse(src, path, parseMode) + if err != nil { + return err + } + writeBuf.Reset() + printConfig.Fprint(&writeBuf, prog) + res := writeBuf.Bytes() + if !bytes.Equal(src, res) { + if *list { + fmt.Fprintln(out, path) + } + if *write { + if err := empty(f); err != nil { + return err + } + if _, err := f.Write(res); err != nil { + return err + } + } + } + if !*list && !*write { + if _, err := out.Write(res); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/doc.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/doc.go new file mode 100644 index 0000000000..eff8c2fecc --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/doc.go @@ -0,0 +1,6 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +// Package syntax implements parsing and formatting of shell programs. +// It supports both POSIX Shell and Bash. +package syntax diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/lexer.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/lexer.go new file mode 100644 index 0000000000..8b5cbd3a14 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/lexer.go @@ -0,0 +1,962 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package syntax + +import ( + "bytes" +) + +// bytes that form or start a token +func regOps(b byte) bool { + return b == ';' || b == '"' || b == '\'' || b == '(' || + b == ')' || b == '$' || b == '|' || b == '&' || + b == '>' || b == '<' || b == '`' +} + +// tokenize these inside parameter expansions +func paramOps(b byte) bool { + return b == '}' || b == '#' || b == ':' || b == '-' || b == '+' || + b == '=' || b == '?' || b == '%' || b == '[' || b == '/' || + b == '^' || b == ',' +} + +// tokenize these inside arithmetic expansions +func arithmOps(b byte) bool { + return b == '+' || b == '-' || b == '!' || b == '*' || + b == '/' || b == '%' || b == '(' || b == ')' || + b == '^' || b == '<' || b == '>' || b == ':' || + b == '=' || b == ',' || b == '?' || b == '|' || + b == '&' +} + +func wordBreak(b byte) bool { + return b == ' ' || b == '\t' || b == '\r' || b == '\n' || + b == '&' || b == '>' || b == '<' || b == '|' || + b == ';' || b == '(' || b == ')' +} + +func (p *parser) next() { + if p.tok == _EOF || p.npos >= len(p.src) { + p.tok = _EOF + return + } + p.spaced, p.newLine = false, false + b, q := p.src[p.npos], p.quote + p.pos = Pos(p.npos + 1) + switch q { + case hdocWord: + if wordBreak(b) { + p.tok = illegalTok + p.spaced = true + return + } + case paramExpRepl: + switch b { + case '}': + p.npos++ + p.tok = rightBrace + case '/': + p.npos++ + p.tok = Quo + case '`', '"', '$': + p.tok = p.dqToken(b) + default: + p.advanceLitOther(q) + } + return + case dblQuotes: + if b == '`' || b == '"' || b == '$' { + p.tok = p.dqToken(b) + } else { + p.advanceLitDquote() + } + return + case hdocBody, hdocBodyTabs: + if b == '`' || b == '$' { + p.tok = p.dqToken(b) + } else if p.hdocStop == nil { + p.tok = illegalTok + } else { + p.advanceLitHdoc() + } + return + case paramExpExp: + switch b { + case '}': + p.npos++ + p.tok = rightBrace + case '`', '"', '$': + p.tok = p.dqToken(b) + default: + p.advanceLitOther(q) + } + return + case sglQuotes: + if b == '\'' { + p.npos++ + p.tok = sglQuote + } else { + p.advanceLitOther(q) + } + return + } +skipSpace: + for { + switch b { + case ' ', '\t', '\r': + p.spaced = true + p.npos++ + case '\n': + if p.quote == arithmExprLet { + p.tok = illegalTok + p.newLine, p.spaced = true, true + return + } + p.spaced = true + if p.npos < len(p.src) { + p.npos++ + } + p.f.Lines = append(p.f.Lines, p.npos) + p.newLine = true + if len(p.heredocs) > p.buriedHdocs { + p.doHeredocs() + if p.tok == _EOF { + return + } + } + case '\\': + if p.npos < len(p.src)-1 && p.src[p.npos+1] == '\n' { + p.npos += 2 + p.f.Lines = append(p.f.Lines, p.npos) + } else { + break skipSpace + } + default: + break skipSpace + } + if p.npos >= len(p.src) { + p.tok = _EOF + return + } + b = p.src[p.npos] + } + p.pos = Pos(p.npos + 1) + switch { + case q&allRegTokens != 0: + switch b { + case ';', '"', '\'', '(', ')', '$', '|', '&', '>', '<', '`': + p.tok = p.regToken(b) + case '#': + p.npos++ + bs, _ := p.readUntil('\n') + p.npos += len(bs) + if p.mode&ParseComments > 0 { + p.f.Comments = append(p.f.Comments, &Comment{ + Hash: p.pos, + Text: string(bs), + }) + } + p.next() + case '?', '*', '+', '@', '!': + if p.bash() && p.npos+1 < len(p.src) && p.src[p.npos+1] == '(' { + switch b { + case '?': + p.tok = globQuest + case '*': + p.tok = globMul + case '+': + p.tok = globAdd + case '@': + p.tok = globAt + default: // '!' + p.tok = globNot + } + p.npos += 2 + } else { + p.advanceLitNone() + } + default: + p.advanceLitNone() + } + case q == paramExpName && paramOps(b): + p.tok = p.paramToken(b) + case q&allArithmExpr != 0 && arithmOps(b): + p.tok = p.arithmToken(b) + case q&allRbrack != 0 && b == ']': + p.npos++ + p.tok = rightBrack + case q == testRegexp: + p.advanceLitRe() + case regOps(b): + p.tok = p.regToken(b) + default: + p.advanceLitOther(q) + } +} + +func byteAt(src []byte, i int) byte { + if i >= len(src) { + return 0 + } + return src[i] +} + +func (p *parser) regToken(b byte) Token { + switch b { + case '\'': + p.npos++ + return sglQuote + case '"': + p.npos++ + return dblQuote + case '`': + p.npos++ + return bckQuote + case '&': + switch byteAt(p.src, p.npos+1) { + case '&': + p.npos += 2 + return AndExpr + case '>': + if !p.bash() { + break + } + if byteAt(p.src, p.npos+2) == '>' { + p.npos += 3 + return appAll + } + p.npos += 2 + return rdrAll + } + p.npos++ + return And + case '|': + switch byteAt(p.src, p.npos+1) { + case '|': + p.npos += 2 + return OrExpr + case '&': + if !p.bash() { + break + } + p.npos += 2 + return pipeAll + } + p.npos++ + return Or + case '$': + switch byteAt(p.src, p.npos+1) { + case '\'': + if !p.bash() { + break + } + p.npos += 2 + return dollSglQuote + case '"': + if !p.bash() { + break + } + p.npos += 2 + return dollDblQuote + case '{': + p.npos += 2 + return dollBrace + case '[': + if !p.bash() { + break + } + p.npos += 2 + return dollBrack + case '(': + if byteAt(p.src, p.npos+2) == '(' { + p.npos += 3 + return dollDblParen + } + p.npos += 2 + return dollParen + } + p.npos++ + return dollar + case '(': + if p.bash() && byteAt(p.src, p.npos+1) == '(' { + p.npos += 2 + return dblLeftParen + } + p.npos++ + return leftParen + case ')': + p.npos++ + return rightParen + case ';': + switch byteAt(p.src, p.npos+1) { + case ';': + if p.bash() && byteAt(p.src, p.npos+2) == '&' { + p.npos += 3 + return dblSemiFall + } + p.npos += 2 + return dblSemicolon + case '&': + if !p.bash() { + break + } + p.npos += 2 + return semiFall + } + p.npos++ + return semicolon + case '<': + switch byteAt(p.src, p.npos+1) { + case '<': + if b := byteAt(p.src, p.npos+2); b == '-' { + p.npos += 3 + return dashHdoc + } else if p.bash() && b == '<' { + p.npos += 3 + return wordHdoc + } + p.npos += 2 + return Shl + case '>': + p.npos += 2 + return rdrInOut + case '&': + p.npos += 2 + return dplIn + case '(': + if !p.bash() { + break + } + p.npos += 2 + return cmdIn + } + p.npos++ + return Lss + default: // '>' + switch byteAt(p.src, p.npos+1) { + case '>': + p.npos += 2 + return Shr + case '&': + p.npos += 2 + return dplOut + case '|': + p.npos += 2 + return clbOut + case '(': + if !p.bash() { + break + } + p.npos += 2 + return cmdOut + } + p.npos++ + return Gtr + } +} + +func (p *parser) dqToken(b byte) Token { + switch b { + case '"': + p.npos++ + return dblQuote + case '`': + p.npos++ + return bckQuote + default: // '$' + switch byteAt(p.src, p.npos+1) { + case '{': + p.npos += 2 + return dollBrace + case '[': + if !p.bash() { + break + } + p.npos += 2 + return dollBrack + case '(': + if byteAt(p.src, p.npos+2) == '(' { + p.npos += 3 + return dollDblParen + } + p.npos += 2 + return dollParen + } + p.npos++ + return dollar + } +} + +func (p *parser) paramToken(b byte) Token { + switch b { + case '}': + p.npos++ + return rightBrace + case ':': + switch byteAt(p.src, p.npos+1) { + case '+': + p.npos += 2 + return ColAdd + case '-': + p.npos += 2 + return ColSub + case '?': + p.npos += 2 + return ColQuest + case '=': + p.npos += 2 + return ColAssgn + } + p.npos++ + return Colon + case '+': + p.npos++ + return Add + case '-': + p.npos++ + return Sub + case '?': + p.npos++ + return Quest + case '=': + p.npos++ + return Assgn + case '%': + if byteAt(p.src, p.npos+1) == '%' { + p.npos += 2 + return dblRem + } + p.npos++ + return Rem + case '#': + if byteAt(p.src, p.npos+1) == '#' { + p.npos += 2 + return dblHash + } + p.npos++ + return Hash + case '[': + p.npos++ + return leftBrack + case '^': + if byteAt(p.src, p.npos+1) == '^' { + p.npos += 2 + return dblXor + } + p.npos++ + return Xor + case ',': + if byteAt(p.src, p.npos+1) == ',' { + p.npos += 2 + return dblComma + } + p.npos++ + return Comma + default: // '/' + if byteAt(p.src, p.npos+1) == '/' { + p.npos += 2 + return dblQuo + } + p.npos++ + return Quo + } +} + +func (p *parser) arithmToken(b byte) Token { + switch b { + case '!': + if byteAt(p.src, p.npos+1) == '=' { + p.npos += 2 + return Neq + } + p.npos++ + return Not + case '=': + if byteAt(p.src, p.npos+1) == '=' { + p.npos += 2 + return Eql + } + p.npos++ + return Assgn + case '(': + p.npos++ + return leftParen + case ')': + p.npos++ + return rightParen + case '&': + switch byteAt(p.src, p.npos+1) { + case '&': + p.npos += 2 + return AndExpr + case '=': + p.npos += 2 + return AndAssgn + } + p.npos++ + return And + case '|': + switch byteAt(p.src, p.npos+1) { + case '|': + p.npos += 2 + return OrExpr + case '=': + p.npos += 2 + return OrAssgn + } + p.npos++ + return Or + case '<': + switch byteAt(p.src, p.npos+1) { + case '<': + if byteAt(p.src, p.npos+2) == '=' { + p.npos += 3 + return ShlAssgn + } + p.npos += 2 + return Shl + case '=': + p.npos += 2 + return Leq + } + p.npos++ + return Lss + case '>': + switch byteAt(p.src, p.npos+1) { + case '>': + if byteAt(p.src, p.npos+2) == '=' { + p.npos += 3 + return ShrAssgn + } + p.npos += 2 + return Shr + case '=': + p.npos += 2 + return Geq + } + p.npos++ + return Gtr + case '+': + switch byteAt(p.src, p.npos+1) { + case '+': + p.npos += 2 + return Inc + case '=': + p.npos += 2 + return AddAssgn + } + p.npos++ + return Add + case '-': + switch byteAt(p.src, p.npos+1) { + case '-': + p.npos += 2 + return Dec + case '=': + p.npos += 2 + return SubAssgn + } + p.npos++ + return Sub + case '%': + if byteAt(p.src, p.npos+1) == '=' { + p.npos += 2 + return RemAssgn + } + p.npos++ + return Rem + case '*': + switch byteAt(p.src, p.npos+1) { + case '*': + p.npos += 2 + return Pow + case '=': + p.npos += 2 + return MulAssgn + } + p.npos++ + return Mul + case '/': + if byteAt(p.src, p.npos+1) == '=' { + p.npos += 2 + return QuoAssgn + } + p.npos++ + return Quo + case '^': + if byteAt(p.src, p.npos+1) == '=' { + p.npos += 2 + return XorAssgn + } + p.npos++ + return Xor + case ',': + p.npos++ + return Comma + case '?': + p.npos++ + return Quest + default: // ':' + p.npos++ + return Colon + } +} + +func (p *parser) advanceLitOther(q quoteState) { + bs := p.litBuf[:0] + for { + if p.npos >= len(p.src) { + p.tok, p.val = _LitWord, string(bs) + return + } + b := p.src[p.npos] + switch { + case b == '\\': // escaped byte follows + if p.npos == len(p.src)-1 { + p.npos++ + bs = append(bs, '\\') + p.tok, p.val = _LitWord, string(bs) + return + } + b = p.src[p.npos+1] + p.npos += 2 + if b == '\n' { + p.f.Lines = append(p.f.Lines, p.npos) + } else { + bs = append(bs, '\\', b) + } + continue + case q == sglQuotes: + switch b { + case '\n': + p.f.Lines = append(p.f.Lines, p.npos+1) + case '\'': + p.tok, p.val = _LitWord, string(bs) + return + } + case b == '`', b == '$': + p.tok, p.val = _Lit, string(bs) + return + case q == paramExpExp: + if b == '}' { + p.tok, p.val = _LitWord, string(bs) + return + } else if b == '"' { + p.tok, p.val = _Lit, string(bs) + return + } + case q == paramExpRepl: + if b == '}' || b == '/' { + p.tok, p.val = _LitWord, string(bs) + return + } + case wordBreak(b), regOps(b), q&allArithmExpr != 0 && arithmOps(b), + q == paramExpName && paramOps(b), + q&allRbrack != 0 && b == ']': + p.tok, p.val = _LitWord, string(bs) + return + } + bs = append(bs, p.src[p.npos]) + p.npos++ + } +} + +func (p *parser) advanceLitNone() { + var i int + tok := _Lit + p.asPos = 0 +loop: + for i = p.npos; i < len(p.src); i++ { + switch p.src[i] { + case '\\': // escaped byte follows + if i == len(p.src)-1 { + break + } + if i++; p.src[i] == '\n' { + p.f.Lines = append(p.f.Lines, i+1) + bs := p.src[p.npos : i-1] + p.npos = i + 1 + p.advanceLitNoneCont(bs) + return + } + case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')': + tok = _LitWord + break loop + case '?', '*', '+', '@', '!': + if p.bash() && i+1 < len(p.src) && p.src[i+1] == '(' { + break loop + } + case '`': + if p.quote == subCmdBckquo { + tok = _LitWord + } + break loop + case '"', '\'', '$': + break loop + case '=': + p.asPos = i - p.npos + if p.bash() && p.asPos > 0 && p.src[p.npos+p.asPos-1] == '+' { + p.asPos-- // a+=b + } + } + } + if i == len(p.src) { + tok = _LitWord + } + p.tok, p.val = tok, string(p.src[p.npos:i]) + p.npos = i +} + +func (p *parser) advanceLitNoneCont(bs []byte) { + for { + if p.npos >= len(p.src) { + p.tok, p.val = _LitWord, string(bs) + return + } + switch p.src[p.npos] { + case '\\': // escaped byte follows + if p.npos == len(p.src)-1 { + p.npos++ + bs = append(bs, '\\') + p.tok, p.val = _LitWord, string(bs) + return + } + b := p.src[p.npos+1] + p.npos += 2 + if b == '\n' { + p.f.Lines = append(p.f.Lines, p.npos) + } else { + bs = append(bs, '\\', b) + } + case ' ', '\t', '\n', '\r', '&', '>', '<', '|', ';', '(', ')': + p.tok, p.val = _LitWord, string(bs) + return + case '`': + if p.quote == subCmdBckquo { + p.tok, p.val = _LitWord, string(bs) + return + } + fallthrough + case '"', '\'', '$': + p.tok, p.val = _Lit, string(bs) + return + default: + bs = append(bs, p.src[p.npos]) + p.npos++ + } + } +} + +func (p *parser) advanceLitDquote() { + var i int + tok := _Lit +loop: + for i = p.npos; i < len(p.src); i++ { + switch p.src[i] { + case '\\': // escaped byte follows + if i == len(p.src)-1 { + break + } + if i++; p.src[i] == '\n' { + p.f.Lines = append(p.f.Lines, i+1) + } + case '"': + tok = _LitWord + break loop + case '`', '$': + break loop + case '\n': + p.f.Lines = append(p.f.Lines, i+1) + } + } + p.tok, p.val = tok, string(p.src[p.npos:i]) + p.npos = i +} + +func (p *parser) isHdocEnd(i int) bool { + end := p.hdocStop + if end == nil || len(p.src) < i+len(end) { + return false + } + if !bytes.Equal(end, p.src[i:i+len(end)]) { + return false + } + return len(p.src) == i+len(end) || p.src[i+len(end)] == '\n' +} + +func (p *parser) advanceLitHdoc() { + n := p.npos + if p.quote == hdocBodyTabs { + for n < len(p.src) && p.src[n] == '\t' { + n++ + } + } + if p.isHdocEnd(n) { + if n > p.npos { + p.tok, p.val = _LitWord, string(p.src[p.npos:n]) + } + p.npos = n + len(p.hdocStop) + p.hdocStop = nil + return + } + var i int +loop: + for i = p.npos; i < len(p.src); i++ { + switch p.src[i] { + case '\\': // escaped byte follows + if i++; i == len(p.src) { + break loop + } + if p.src[i] == '\n' { + p.f.Lines = append(p.f.Lines, i+1) + } + case '`', '$': + break loop + case '\n': + n := i + 1 + p.f.Lines = append(p.f.Lines, n) + if p.quote == hdocBodyTabs { + for n < len(p.src) && p.src[n] == '\t' { + n++ + } + } + if p.isHdocEnd(n) { + p.tok, p.val = _LitWord, string(p.src[p.npos:n]) + p.npos = n + len(p.hdocStop) + p.hdocStop = nil + return + } + } + } + p.tok, p.val = _Lit, string(p.src[p.npos:i]) + p.npos = i +} + +func (p *parser) hdocLitWord() Word { + pos := p.npos + end := pos + for p.npos < len(p.src) { + end = p.npos + bs, found := p.readUntil('\n') + p.npos += len(bs) + 1 + if found { + p.f.Lines = append(p.f.Lines, p.npos) + } + if p.quote == hdocBodyTabs { + for end < len(p.src) && p.src[end] == '\t' { + end++ + } + } + if p.isHdocEnd(end) { + break + } + } + if p.npos == len(p.src) { + end = p.npos + } + l := p.lit(Pos(pos+1), string(p.src[pos:end])) + return Word{Parts: p.singleWps(l)} +} + +func (p *parser) readUntil(b byte) ([]byte, bool) { + rem := p.src[p.npos:] + if i := bytes.IndexByte(rem, b); i >= 0 { + return rem[:i], true + } + return rem, false +} + +func (p *parser) advanceLitRe() { + end := bytes.Index(p.src[p.npos:], []byte(" ]]")) + p.tok = _LitWord + if end == -1 { + p.val = string(p.src[p.npos:]) + p.npos = len(p.src) + return + } + p.val = string(p.src[p.npos : p.npos+end]) + p.npos += end +} + +func testUnaryOp(val string) Token { + switch val { + case "!": + return Not + case "-e", "-a": + return tsExists + case "-f": + return tsRegFile + case "-d": + return tsDirect + case "-c": + return tsCharSp + case "-b": + return tsBlckSp + case "-p": + return tsNmPipe + case "-S": + return tsSocket + case "-L", "-h": + return tsSmbLink + case "-g": + return tsGIDSet + case "-u": + return tsUIDSet + case "-r": + return tsRead + case "-w": + return tsWrite + case "-x": + return tsExec + case "-s": + return tsNoEmpty + case "-t": + return tsFdTerm + case "-z": + return tsEmpStr + case "-n": + return tsNempStr + case "-o": + return tsOptSet + case "-v": + return tsVarSet + case "-R": + return tsRefVar + default: + return illegalTok + } +} + +func testBinaryOp(val string) Token { + switch val { + case "=": + return Assgn + case "==": + return Eql + case "!=": + return Neq + case "=~": + return tsReMatch + case "-nt": + return tsNewer + case "-ot": + return tsOlder + case "-ef": + return tsDevIno + case "-eq": + return tsEql + case "-ne": + return tsNeq + case "-le": + return tsLeq + case "-ge": + return tsGeq + case "-lt": + return tsLss + case "-gt": + return tsGtr + default: + return illegalTok + } +} diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/nodes.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/nodes.go new file mode 100644 index 0000000000..786cafc4b2 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/nodes.go @@ -0,0 +1,717 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package syntax + +// Node represents an AST node. +type Node interface { + // Pos returns the first character of the node + Pos() Pos + // End returns the character immediately after the node + End() Pos +} + +// File is a shell program. +type File struct { + Name string + + Stmts []*Stmt + Comments []*Comment + + // Lines contains the offset of the first character for each + // line (the first entry is always 0) + Lines []int +} + +func (f *File) Pos() Pos { return stmtFirstPos(f.Stmts) } +func (f *File) End() Pos { return stmtLastEnd(f.Stmts) } + +func (f *File) Position(p Pos) (pos Position) { + intp := int(p) + pos.Offset = intp - 1 + if i := searchInts(f.Lines, intp); i >= 0 { + pos.Line, pos.Column = i+1, intp-f.Lines[i] + } + return +} + +// Inlined version of: +// sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1 +func searchInts(a []int, x int) int { + i, j := 0, len(a) + for i < j { + h := i + (j-i)/2 + if a[h] <= x { + i = h + 1 + } else { + j = h + } + } + return i - 1 +} + +func posMax(p1, p2 Pos) Pos { + if p2 > p1 { + return p2 + } + return p1 +} + +// Comment represents a single comment on a single line. +type Comment struct { + Hash Pos + Text string +} + +func (c *Comment) Pos() Pos { return c.Hash } +func (c *Comment) End() Pos { return posAddStr(c.Hash, c.Text) } + +// Stmt represents a statement, otherwise known as a compound command. +// It is compromised of a command and other components that may come +// before or after it. +type Stmt struct { + Cmd Command + Position Pos + SemiPos Pos + Negated bool + Background bool + Assigns []*Assign + Redirs []*Redirect +} + +func (s *Stmt) Pos() Pos { return s.Position } +func (s *Stmt) End() Pos { + if s.SemiPos > 0 { + return s.SemiPos + 1 + } + end := s.Position + if s.Negated { + end = posAdd(end, 1) + } + if s.Cmd != nil { + end = s.Cmd.End() + } + if len(s.Assigns) > 0 { + assEnd := s.Assigns[len(s.Assigns)-1].End() + end = posMax(end, assEnd) + } + if len(s.Redirs) > 0 { + redEnd := s.Redirs[len(s.Redirs)-1].End() + end = posMax(end, redEnd) + } + return end +} + +// Command represents all nodes that are simple commands, which are +// directly placed in a Stmt. +type Command interface { + Node + commandNode() +} + +func (*CallExpr) commandNode() {} +func (*IfClause) commandNode() {} +func (*WhileClause) commandNode() {} +func (*UntilClause) commandNode() {} +func (*ForClause) commandNode() {} +func (*CaseClause) commandNode() {} +func (*Block) commandNode() {} +func (*Subshell) commandNode() {} +func (*BinaryCmd) commandNode() {} +func (*FuncDecl) commandNode() {} +func (*ArithmCmd) commandNode() {} +func (*TestClause) commandNode() {} +func (*DeclClause) commandNode() {} +func (*EvalClause) commandNode() {} +func (*LetClause) commandNode() {} +func (*CoprocClause) commandNode() {} + +// Assign represents an assignment to a variable. +type Assign struct { + Append bool + Name *Lit + Value Word +} + +func (a *Assign) Pos() Pos { + if a.Name == nil { + return a.Value.Pos() + } + return a.Name.Pos() +} + +func (a *Assign) End() Pos { + if a.Name != nil { + return posMax(a.Name.End(), a.Value.End()) + } + return a.Value.End() +} + +// Redirect represents an input/output redirection. +type Redirect struct { + OpPos Pos + Op RedirOperator + N *Lit + Word, Hdoc Word +} + +func (r *Redirect) Pos() Pos { + if r.N != nil { + return r.N.Pos() + } + return r.OpPos +} +func (r *Redirect) End() Pos { return r.Word.End() } + +// CallExpr represents a command execution or function call. +type CallExpr struct { + Args []Word +} + +func (c *CallExpr) Pos() Pos { return c.Args[0].Pos() } +func (c *CallExpr) End() Pos { return c.Args[len(c.Args)-1].End() } + +// Subshell represents a series of commands that should be executed in a +// nested shell environment. +type Subshell struct { + Lparen, Rparen Pos + Stmts []*Stmt +} + +func (s *Subshell) Pos() Pos { return s.Lparen } +func (s *Subshell) End() Pos { return posAdd(s.Rparen, 1) } + +// Block represents a series of commands that should be executed in a +// nested scope. +type Block struct { + Lbrace, Rbrace Pos + Stmts []*Stmt +} + +func (b *Block) Pos() Pos { return b.Rbrace } +func (b *Block) End() Pos { return posAdd(b.Rbrace, 1) } + +// IfClause represents an if statement. +type IfClause struct { + If, Then, Fi Pos + CondStmts []*Stmt + ThenStmts []*Stmt + Elifs []*Elif + Else Pos + ElseStmts []*Stmt +} + +func (c *IfClause) Pos() Pos { return c.If } +func (c *IfClause) End() Pos { return posAdd(c.Fi, 2) } + +// Elif represents an "else if" case in an if clause. +type Elif struct { + Elif, Then Pos + CondStmts []*Stmt + ThenStmts []*Stmt +} + +// WhileClause represents a while clause. +type WhileClause struct { + While, Do, Done Pos + CondStmts []*Stmt + DoStmts []*Stmt +} + +func (w *WhileClause) Pos() Pos { return w.While } +func (w *WhileClause) End() Pos { return posAdd(w.Done, 4) } + +// UntilClause represents an until clause. +type UntilClause struct { + Until, Do, Done Pos + CondStmts []*Stmt + DoStmts []*Stmt +} + +func (u *UntilClause) Pos() Pos { return u.Until } +func (u *UntilClause) End() Pos { return posAdd(u.Done, 4) } + +// ForClause represents a for clause. +type ForClause struct { + For, Do, Done Pos + Loop Loop + DoStmts []*Stmt +} + +func (f *ForClause) Pos() Pos { return f.For } +func (f *ForClause) End() Pos { return posAdd(f.Done, 4) } + +// Loop represents all nodes that can be loops in a for clause. +type Loop interface { + Node + loopNode() +} + +func (*WordIter) loopNode() {} +func (*CStyleLoop) loopNode() {} + +// WordIter represents the iteration of a variable over a series of +// words in a for clause. +type WordIter struct { + Name Lit + List []Word +} + +func (w *WordIter) Pos() Pos { return w.Name.Pos() } +func (w *WordIter) End() Pos { return posMax(w.Name.End(), wordLastEnd(w.List)) } + +// CStyleLoop represents the behaviour of a for clause similar to the C +// language. +// +// This node will never appear when in PosixConformant mode. +type CStyleLoop struct { + Lparen, Rparen Pos + Init, Cond, Post ArithmExpr +} + +func (c *CStyleLoop) Pos() Pos { return c.Lparen } +func (c *CStyleLoop) End() Pos { return posAdd(c.Rparen, 2) } + +// BinaryCmd represents a binary expression between two statements. +type BinaryCmd struct { + OpPos Pos + Op BinCmdOperator + X, Y *Stmt +} + +func (b *BinaryCmd) Pos() Pos { return b.X.Pos() } +func (b *BinaryCmd) End() Pos { return b.Y.End() } + +// FuncDecl represents the declaration of a function. +type FuncDecl struct { + Position Pos + BashStyle bool + Name Lit + Body *Stmt +} + +func (f *FuncDecl) Pos() Pos { return f.Position } +func (f *FuncDecl) End() Pos { return f.Body.End() } + +// Word represents a list of nodes that are contiguous to each other. +// The word is delimeted by word boundaries. +type Word struct { + Parts []WordPart +} + +func (w *Word) Pos() Pos { return partsFirstPos(w.Parts) } +func (w *Word) End() Pos { return partsLastEnd(w.Parts) } + +// WordPart represents all nodes that can form a word. +type WordPart interface { + Node + wordPartNode() +} + +func (*Lit) wordPartNode() {} +func (*SglQuoted) wordPartNode() {} +func (*DblQuoted) wordPartNode() {} +func (*ParamExp) wordPartNode() {} +func (*CmdSubst) wordPartNode() {} +func (*ArithmExp) wordPartNode() {} +func (*ProcSubst) wordPartNode() {} +func (*ArrayExpr) wordPartNode() {} +func (*ExtGlob) wordPartNode() {} + +// Lit represents an unquoted string consisting of characters that were +// not tokenized. +type Lit struct { + ValuePos Pos + Value string +} + +func (l *Lit) Pos() Pos { return l.ValuePos } +func (l *Lit) End() Pos { return posAddStr(l.ValuePos, l.Value) } + +// SglQuoted represents a string within single quotes. +type SglQuoted struct { + Position Pos + Dollar bool + Value string +} + +func (q *SglQuoted) Pos() Pos { return q.Position } +func (q *SglQuoted) End() Pos { + pos := posAdd(q.Position, 2+len(q.Value)) + if pos > 0 && q.Dollar { + pos++ + } + return pos +} + +// DblQuoted represents a list of nodes within double quotes. +type DblQuoted struct { + Position Pos + Dollar bool + Parts []WordPart +} + +func (q *DblQuoted) Pos() Pos { return q.Position } +func (q *DblQuoted) End() Pos { + if q.Position == 0 { + return defaultPos + } + end := q.Position + if len(q.Parts) > 0 { + end = partsLastEnd(q.Parts) + } else if q.Dollar { + end += 2 + } else { + end++ + } + return posAdd(end, 1) +} + +// CmdSubst represents a command substitution. +type CmdSubst struct { + Left, Right Pos + Stmts []*Stmt +} + +func (c *CmdSubst) Pos() Pos { return c.Left } +func (c *CmdSubst) End() Pos { return posAdd(c.Right, 1) } + +// ParamExp represents a parameter expansion. +type ParamExp struct { + Dollar, Rbrace Pos + Short, Length bool + Param Lit + Ind *Index + Slice *Slice + Repl *Replace + Exp *Expansion +} + +func (p *ParamExp) Pos() Pos { return p.Dollar } +func (p *ParamExp) End() Pos { + if p.Rbrace > 0 { + return p.Rbrace + 1 + } + return p.Param.End() +} + +// Index represents access to an array via an index inside a ParamExp. +// +// This node will never appear when in PosixConformant mode. +type Index struct { + Word Word +} + +// Slice represents character slicing inside a ParamExp. +// +// This node will never appear when in PosixConformant mode. +type Slice struct { + Offset, Length Word +} + +// Replace represents a search and replace inside a ParamExp. +type Replace struct { + All bool + Orig, With Word +} + +// Expansion represents string manipulation in a ParamExp other than +// those covered by Replace. +type Expansion struct { + Op ParExpOperator + Word Word +} + +// ArithmExp represents an arithmetic expansion. +type ArithmExp struct { + Left, Right Pos + Bracket bool + X ArithmExpr +} + +func (a *ArithmExp) Pos() Pos { return a.Left } +func (a *ArithmExp) End() Pos { + if a.Bracket { + return posAdd(a.Right, 1) + } + return posAdd(a.Right, 2) +} + +// ArithmCmd represents an arithmetic command. +// +// This node will never appear when in PosixConformant mode. +type ArithmCmd struct { + Left, Right Pos + X ArithmExpr +} + +func (a *ArithmCmd) Pos() Pos { return a.Left } +func (a *ArithmCmd) End() Pos { return posAdd(a.Right, 2) } + +// ArithmExpr represents all nodes that form arithmetic expressions. +type ArithmExpr interface { + Node + arithmExprNode() +} + +func (*BinaryArithm) arithmExprNode() {} +func (*UnaryArithm) arithmExprNode() {} +func (*ParenArithm) arithmExprNode() {} +func (*Word) arithmExprNode() {} + +// BinaryArithm represents a binary expression between two arithmetic +// expression. +type BinaryArithm struct { + OpPos Pos + Op Token + X, Y ArithmExpr +} + +func (b *BinaryArithm) Pos() Pos { return b.X.Pos() } +func (b *BinaryArithm) End() Pos { return b.Y.End() } + +// UnaryArithm represents an unary expression over a node, either before +// or after it. +type UnaryArithm struct { + OpPos Pos + Op Token + Post bool + X ArithmExpr +} + +func (u *UnaryArithm) Pos() Pos { + if u.Post { + return u.X.Pos() + } + return u.OpPos +} + +func (u *UnaryArithm) End() Pos { + if u.Post { + return posAdd(u.OpPos, 2) + } + return u.X.End() +} + +// ParenArithm represents an expression within parentheses inside an +// ArithmExp. +type ParenArithm struct { + Lparen, Rparen Pos + X ArithmExpr +} + +func (p *ParenArithm) Pos() Pos { return p.Lparen } +func (p *ParenArithm) End() Pos { return posAdd(p.Rparen, 1) } + +// CaseClause represents a case (switch) clause. +type CaseClause struct { + Case, Esac Pos + Word Word + List []*PatternList +} + +func (c *CaseClause) Pos() Pos { return c.Case } +func (c *CaseClause) End() Pos { return posAdd(c.Esac, 4) } + +// PatternList represents a pattern list (case) within a CaseClause. +type PatternList struct { + Op CaseOperator + OpPos Pos + Patterns []Word + Stmts []*Stmt +} + +// TestClause represents a Bash extended test clause. +// +// This node will never appear when in PosixConformant mode. +type TestClause struct { + Left, Right Pos + X TestExpr +} + +func (t *TestClause) Pos() Pos { return t.Left } +func (t *TestClause) End() Pos { return posAdd(t.Right, 2) } + +// TestExpr represents all nodes that form arithmetic expressions. +type TestExpr interface { + Node + testExprNode() +} + +func (*BinaryTest) testExprNode() {} +func (*UnaryTest) testExprNode() {} +func (*ParenTest) testExprNode() {} +func (*Word) testExprNode() {} + +// BinaryTest represents a binary expression between two arithmetic +// expression. +type BinaryTest struct { + OpPos Pos + Op BinTestOperator + X, Y TestExpr +} + +func (b *BinaryTest) Pos() Pos { return b.X.Pos() } +func (b *BinaryTest) End() Pos { return b.Y.End() } + +// UnaryTest represents an unary expression over a node, either before +// or after it. +type UnaryTest struct { + OpPos Pos + Op UnTestOperator + X TestExpr +} + +func (u *UnaryTest) Pos() Pos { return u.OpPos } +func (u *UnaryTest) End() Pos { return u.X.End() } + +// ParenTest represents an expression within parentheses inside an +// TestExp. +type ParenTest struct { + Lparen, Rparen Pos + X TestExpr +} + +func (p *ParenTest) Pos() Pos { return p.Lparen } +func (p *ParenTest) End() Pos { return posAdd(p.Rparen, 1) } + +// DeclClause represents a Bash declare clause. +// +// This node will never appear when in PosixConformant mode. +type DeclClause struct { + Position Pos + Variant string + Opts []Word + Assigns []*Assign +} + +func (d *DeclClause) Pos() Pos { return d.Position } +func (d *DeclClause) End() Pos { + end := wordLastEnd(d.Opts) + if len(d.Assigns) > 0 { + assignEnd := d.Assigns[len(d.Assigns)-1].End() + end = posMax(end, assignEnd) + } + return end +} + +// ArrayExpr represents a Bash array expression. +// +// This node will never appear when in PosixConformant mode. +type ArrayExpr struct { + Lparen, Rparen Pos + List []Word +} + +func (a *ArrayExpr) Pos() Pos { return a.Lparen } +func (a *ArrayExpr) End() Pos { return posAdd(a.Rparen, 1) } + +// ExtGlob represents a Bash extended globbing expression. Note that +// these are parsed independently of whether shopt has been called or +// not. +// +// This node will never appear when in PosixConformant mode. +type ExtGlob struct { + Op GlobOperator + Pattern Lit +} + +func (e *ExtGlob) Pos() Pos { return posAdd(e.Pattern.Pos(), -2) } +func (e *ExtGlob) End() Pos { return posAdd(e.Pattern.End(), 1) } + +// ProcSubst represents a Bash process substitution. +// +// This node will never appear when in PosixConformant mode. +type ProcSubst struct { + OpPos, Rparen Pos + Op ProcOperator + Stmts []*Stmt +} + +func (s *ProcSubst) Pos() Pos { return s.OpPos } +func (s *ProcSubst) End() Pos { return posAdd(s.Rparen, 1) } + +// EvalClause represents a Bash eval clause. +// +// This node will never appear when in PosixConformant mode. +type EvalClause struct { + Eval Pos + Stmt *Stmt +} + +func (e *EvalClause) Pos() Pos { return e.Eval } +func (e *EvalClause) End() Pos { + if e.Stmt == nil { + return posAdd(e.Eval, 4) + } + return e.Stmt.End() +} + +// CoprocClause represents a Bash coproc clause. +// +// This node will never appear when in PosixConformant mode. +type CoprocClause struct { + Coproc Pos + Name *Lit + Stmt *Stmt +} + +func (c *CoprocClause) Pos() Pos { return c.Coproc } +func (c *CoprocClause) End() Pos { return c.Stmt.End() } + +// LetClause represents a Bash let clause. +// +// This node will never appear when in PosixConformant mode. +type LetClause struct { + Let Pos + Exprs []ArithmExpr +} + +func (l *LetClause) Pos() Pos { return l.Let } +func (l *LetClause) End() Pos { return l.Exprs[len(l.Exprs)-1].End() } + +func posAdd(pos Pos, n int) Pos { + if pos == defaultPos { + return pos + } + return pos + Pos(n) +} + +func posAddStr(pos Pos, s string) Pos { + return posAdd(pos, len(s)) +} + +func stmtFirstPos(sts []*Stmt) Pos { + if len(sts) == 0 { + return defaultPos + } + return sts[0].Pos() +} + +func stmtLastEnd(sts []*Stmt) Pos { + if len(sts) == 0 { + return defaultPos + } + return sts[len(sts)-1].End() +} + +func partsFirstPos(ps []WordPart) Pos { + if len(ps) == 0 { + return defaultPos + } + return ps[0].Pos() +} + +func partsLastEnd(ps []WordPart) Pos { + if len(ps) == 0 { + return defaultPos + } + return ps[len(ps)-1].End() +} + +func wordLastEnd(ws []Word) Pos { + if len(ws) == 0 { + return defaultPos + } + return ws[len(ws)-1].End() +} diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/parser.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/parser.go new file mode 100644 index 0000000000..c9ef38eab2 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/parser.go @@ -0,0 +1,1659 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package syntax + +import ( + "bytes" + "fmt" + "strconv" + "sync" +) + +// ParseMode controls the parser behaviour via a set of flags. +type ParseMode uint + +const ( + ParseComments ParseMode = 1 << iota // add comments to the AST + PosixConformant // match the POSIX standard where it differs from bash +) + +var parserFree = sync.Pool{ + New: func() interface{} { + return &parser{helperBuf: new(bytes.Buffer)} + }, +} + +// Parse reads and parses a shell program with an optional name. It +// returns the parsed program if no issues were encountered. Otherwise, +// an error is returned. +func Parse(src []byte, name string, mode ParseMode) (*File, error) { + p := parserFree.Get().(*parser) + p.reset() + alloc := &struct { + f File + l [16]int + }{} + p.f = &alloc.f + p.f.Name = name + p.f.Lines = alloc.l[:1] + p.src, p.mode = src, mode + p.next() + p.f.Stmts = p.stmts() + parserFree.Put(p) + return p.f, p.err +} + +type parser struct { + src []byte + + f *File + mode ParseMode + + spaced, newLine bool + + err error + + tok Token + val string + + pos Pos + npos int + + quote quoteState + asPos int + + // list of pending heredoc bodies + buriedHdocs int + heredocs []*Redirect + hdocStop []byte + + helperBuf *bytes.Buffer + + litBatch []Lit + wpsBatch []WordPart + stmtBatch []Stmt + stListBatch []*Stmt + + litBuf [32]byte +} + +func (p *parser) lit(pos Pos, val string) *Lit { + if len(p.litBatch) == 0 { + p.litBatch = make([]Lit, 32) + } + l := &p.litBatch[0] + l.ValuePos = pos + l.Value = val + p.litBatch = p.litBatch[1:] + return l +} + +func (p *parser) singleWps(wp WordPart) []WordPart { + if len(p.wpsBatch) == 0 { + p.wpsBatch = make([]WordPart, 64) + } + wps := p.wpsBatch[:1:1] + p.wpsBatch = p.wpsBatch[1:] + wps[0] = wp + return wps +} + +func (p *parser) wps() []WordPart { + if len(p.wpsBatch) < 4 { + p.wpsBatch = make([]WordPart, 64) + } + wps := p.wpsBatch[:0:4] + p.wpsBatch = p.wpsBatch[4:] + return wps +} + +func (p *parser) stmt(pos Pos) *Stmt { + if len(p.stmtBatch) == 0 { + p.stmtBatch = make([]Stmt, 16) + } + s := &p.stmtBatch[0] + s.Position = pos + p.stmtBatch = p.stmtBatch[1:] + return s +} + +func (p *parser) stList() []*Stmt { + if len(p.stListBatch) == 0 { + p.stListBatch = make([]*Stmt, 128) + } + stmts := p.stListBatch[:0:4] + p.stListBatch = p.stListBatch[4:] + return stmts +} + +type quoteState int + +const ( + noState quoteState = 1 << iota + subCmd + subCmdBckquo + sglQuotes + dblQuotes + hdocWord + hdocBody + hdocBodyTabs + arithmExpr + arithmExprLet + arithmExprCmd + arithmExprBrack + testRegexp + switchCase + paramExpName + paramExpInd + paramExpRepl + paramExpExp + + allRegTokens = noState | subCmd | subCmdBckquo | hdocWord | switchCase + allArithmExpr = arithmExpr | arithmExprLet | arithmExprCmd | arithmExprBrack + allRbrack = arithmExprBrack | paramExpInd +) + +func (p *parser) bash() bool { return p.mode&PosixConformant == 0 } + +func (p *parser) reset() { + p.spaced, p.newLine = false, false + p.err = nil + p.npos = 0 + p.tok, p.quote = illegalTok, noState + p.heredocs = p.heredocs[:0] + p.buriedHdocs = 0 +} + +type saveState struct { + quote quoteState + buriedHdocs int +} + +func (p *parser) preNested(quote quoteState) (s saveState) { + s.quote = p.quote + s.buriedHdocs = p.buriedHdocs + p.buriedHdocs = len(p.heredocs) + p.quote = quote + return +} + +func (p *parser) postNested(s saveState) { + p.quote, p.buriedHdocs = s.quote, s.buriedHdocs +} + +func (p *parser) unquotedWordBytes(w Word) ([]byte, bool) { + p.helperBuf.Reset() + didUnquote := false + for _, wp := range w.Parts { + if p.unquotedWordPart(p.helperBuf, wp) { + didUnquote = true + } + } + return p.helperBuf.Bytes(), didUnquote +} + +func (p *parser) unquotedWordPart(b *bytes.Buffer, wp WordPart) bool { + switch x := wp.(type) { + case *Lit: + if x.Value[0] == '\\' { + b.WriteString(x.Value[1:]) + return true + } + b.WriteString(x.Value) + return false + case *SglQuoted: + b.WriteString(x.Value) + return true + case *DblQuoted: + for _, wp2 := range x.Parts { + p.unquotedWordPart(b, wp2) + } + return true + default: + // catch-all for unusual cases such as ParamExp + b.Write(p.src[wp.Pos()-1 : wp.End()-1]) + return false + } +} + +func (p *parser) doHeredocs() { + p.tok = illegalTok + old := p.quote + hdocs := p.heredocs[p.buriedHdocs:] + p.heredocs = p.heredocs[:p.buriedHdocs] + for i, r := range hdocs { + if r.Op == DashHdoc { + p.quote = hdocBodyTabs + } else { + p.quote = hdocBody + } + var quoted bool + p.hdocStop, quoted = p.unquotedWordBytes(r.Word) + if i > 0 && p.npos < len(p.src) && p.src[p.npos] == '\n' { + p.npos++ + p.f.Lines = append(p.f.Lines, p.npos) + } + if !quoted { + p.next() + r.Hdoc = p.word() + continue + } + r.Hdoc = p.hdocLitWord() + + } + p.quote = old +} + +func (p *parser) got(tok Token) bool { + if p.tok == tok { + p.next() + return true + } + return false +} + +func (p *parser) gotRsrv(val string) bool { + if p.tok == _LitWord && p.val == val { + p.next() + return true + } + return false +} + +func (p *parser) gotSameLine(tok Token) bool { + if !p.newLine && p.tok == tok { + p.next() + return true + } + return false +} + +func readableStr(s string) string { + // don't quote tokens like & or } + if s != "" && s[0] >= 'a' && s[0] <= 'z' { + return strconv.Quote(s) + } + return s +} + +func (p *parser) followErr(pos Pos, left, right string) { + leftStr := readableStr(left) + p.posErr(pos, "%s must be followed by %s", leftStr, right) +} + +func (p *parser) follow(lpos Pos, left string, tok Token) Pos { + pos := p.pos + if !p.got(tok) { + p.followErr(lpos, left, tok.String()) + } + return pos +} + +func (p *parser) followRsrv(lpos Pos, left, val string) Pos { + pos := p.pos + if !p.gotRsrv(val) { + p.followErr(lpos, left, fmt.Sprintf("%q", val)) + } + return pos +} + +func (p *parser) followStmts(left string, lpos Pos, stops ...string) []*Stmt { + if p.gotSameLine(semicolon) { + return nil + } + sts := p.stmts(stops...) + if len(sts) < 1 && !p.newLine { + p.followErr(lpos, left, "a statement list") + } + return sts +} + +func (p *parser) followWordTok(tok Token, pos Pos) Word { + w := p.word() + if w.Parts == nil { + p.followErr(pos, tok.String(), "a word") + } + return w +} + +func (p *parser) followWord(s string, pos Pos) Word { + w := p.word() + if w.Parts == nil { + p.followErr(pos, s, "a word") + } + return w +} + +func (p *parser) stmtEnd(n Node, start, end string) Pos { + pos := p.pos + if !p.gotRsrv(end) { + p.posErr(n.Pos(), "%s statement must end with %q", start, end) + } + return pos +} + +func (p *parser) quoteErr(lpos Pos, quote Token) { + p.posErr(lpos, "reached %s without closing quote %s", p.tok, quote) +} + +func (p *parser) matchingErr(lpos Pos, left, right interface{}) { + p.posErr(lpos, "reached %s without matching %s with %s", p.tok, left, right) +} + +func (p *parser) matched(lpos Pos, left, right Token) Pos { + pos := p.pos + if !p.got(right) { + p.matchingErr(lpos, left, right) + } + return pos +} + +func (p *parser) errPass(err error) { + if p.err == nil { + p.err = err + p.tok = _EOF + } +} + +// ParseError represents an error found when parsing a source file. +type ParseError struct { + Position + Filename, Text string +} + +func (e *ParseError) Error() string { + prefix := "" + if e.Filename != "" { + prefix = e.Filename + ":" + } + return fmt.Sprintf("%s%d:%d: %s", prefix, e.Line, e.Column, e.Text) +} + +func (p *parser) posErr(pos Pos, format string, a ...interface{}) { + p.errPass(&ParseError{ + Position: p.f.Position(pos), + Filename: p.f.Name, + Text: fmt.Sprintf(format, a...), + }) +} + +func (p *parser) curErr(format string, a ...interface{}) { + p.posErr(p.pos, format, a...) +} + +func (p *parser) stmts(stops ...string) (sts []*Stmt) { + q := p.quote + gotEnd := true + for p.tok != _EOF { + switch p.tok { + case _LitWord: + for _, stop := range stops { + if p.val == stop { + return + } + } + case rightParen: + if q == subCmd { + return + } + case bckQuote: + if q == subCmdBckquo { + return + } + case dblSemicolon, semiFall, dblSemiFall: + if q == switchCase { + return + } + p.curErr("%s can only be used in a case clause", p.tok) + } + if !p.newLine && !gotEnd { + p.curErr("statements must be separated by &, ; or a newline") + } + if p.tok == _EOF { + break + } + if s, end := p.getStmt(true); s == nil { + p.invalidStmtStart() + } else { + if sts == nil { + sts = p.stList() + } + sts = append(sts, s) + gotEnd = end + } + } + return +} + +func (p *parser) invalidStmtStart() { + switch p.tok { + case semicolon, And, Or, AndExpr, OrExpr: + p.curErr("%s can only immediately follow a statement", p.tok) + case rightParen: + p.curErr("%s can only be used to close a subshell", p.tok) + default: + p.curErr("%s is not a valid start for a statement", p.tok) + } +} + +func (p *parser) word() Word { + if p.tok == _LitWord { + w := Word{Parts: p.singleWps(p.lit(p.pos, p.val))} + p.next() + return w + } + return Word{Parts: p.wordParts()} +} + +func (p *parser) gotLit(l *Lit) bool { + l.ValuePos = p.pos + if p.tok == _Lit || p.tok == _LitWord { + l.Value = p.val + p.next() + return true + } + return false +} + +func (p *parser) wordParts() (wps []WordPart) { + for { + n := p.wordPart() + if n == nil { + return + } + if wps == nil { + wps = p.wps() + } + wps = append(wps, n) + if p.spaced { + return + } + } +} + +func (p *parser) wordPart() WordPart { + switch p.tok { + case _Lit, _LitWord: + l := p.lit(p.pos, p.val) + p.next() + return l + case dollBrace: + return p.paramExp() + case dollDblParen, dollBrack: + left := p.tok + ar := &ArithmExp{Left: p.pos, Bracket: left == dollBrack} + old := p.preNested(arithmExpr) + if ar.Bracket { + p.quote = arithmExprBrack + } else if !p.couldBeArithm() { + p.postNested(old) + p.npos = int(ar.Left) + 1 + p.tok = dollParen + p.pos = ar.Left + wp := p.wordPart() + if p.err != nil { + p.err = nil + p.matchingErr(ar.Left, dollDblParen, dblRightParen) + } + return wp + } + p.next() + ar.X = p.arithmExpr(left, ar.Left, 0, false) + if ar.Bracket { + if p.tok != rightBrack { + p.matchingErr(ar.Left, dollBrack, rightBrack) + } + p.postNested(old) + ar.Right = p.pos + p.next() + } else { + ar.Right = p.arithmEnd(dollDblParen, ar.Left, old) + } + return ar + case dollParen: + if p.quote == hdocWord { + p.curErr("nested statements not allowed in heredoc words") + } + cs := &CmdSubst{Left: p.pos} + old := p.preNested(subCmd) + p.next() + cs.Stmts = p.stmts() + p.postNested(old) + cs.Right = p.matched(cs.Left, leftParen, rightParen) + return cs + case dollar: + var b byte + if p.npos >= len(p.src) { + p.tok = _EOF + } else { + b = p.src[p.npos] + } + if p.tok == _EOF || wordBreak(b) || b == '"' || b == '\'' || b == '`' || b == '[' { + l := p.lit(p.pos, "$") + p.next() + return l + } + pe := &ParamExp{Dollar: p.pos, Short: true} + p.pos++ + switch b { + case '@', '*', '#', '$', '?', '!', '0', '-': + p.npos++ + p.tok, p.val = _Lit, string(b) + default: + p.advanceLitOther(p.quote) + } + p.gotLit(&pe.Param) + return pe + case cmdIn, cmdOut: + ps := &ProcSubst{Op: ProcOperator(p.tok), OpPos: p.pos} + old := p.preNested(subCmd) + p.next() + ps.Stmts = p.stmts() + p.postNested(old) + ps.Rparen = p.matched(ps.OpPos, Token(ps.Op), rightParen) + return ps + case sglQuote: + sq := &SglQuoted{Position: p.pos} + bs, found := p.readUntil('\'') + rem := bs + for { + i := bytes.IndexByte(rem, '\n') + if i < 0 { + p.npos += len(rem) + break + } + p.npos += i + 1 + p.f.Lines = append(p.f.Lines, p.npos) + rem = rem[i+1:] + } + p.npos++ + if !found { + p.posErr(sq.Pos(), "reached EOF without closing quote %s", sglQuote) + } + sq.Value = string(bs) + p.next() + return sq + case dollSglQuote: + sq := &SglQuoted{Position: p.pos, Dollar: true} + old := p.quote + p.quote = sglQuotes + p.next() + if p.tok == sglQuote { + p.quote = old + } else { + sq.Value = p.val + p.quote = old + p.next() + } + if !p.got(sglQuote) { + p.quoteErr(sq.Pos(), sglQuote) + } + return sq + case dblQuote: + if p.quote == dblQuotes { + return nil + } + fallthrough + case dollDblQuote: + q := &DblQuoted{Position: p.pos, Dollar: p.tok == dollDblQuote} + old := p.quote + p.quote = dblQuotes + p.next() + if p.tok == _LitWord { + q.Parts = p.singleWps(p.lit(p.pos, p.val)) + p.next() + } else { + q.Parts = p.wordParts() + } + p.quote = old + if !p.got(dblQuote) { + p.quoteErr(q.Pos(), dblQuote) + } + return q + case bckQuote: + switch p.quote { + case hdocWord: + p.curErr("nested statements not allowed in heredoc words") + case subCmdBckquo: + return nil + } + cs := &CmdSubst{Left: p.pos} + old := p.preNested(subCmdBckquo) + p.next() + cs.Stmts = p.stmts() + p.postNested(old) + cs.Right = p.pos + if !p.got(bckQuote) { + p.quoteErr(cs.Pos(), bckQuote) + } + return cs + case globQuest, globMul, globAdd, globAt, globNot: + eg := &ExtGlob{Op: GlobOperator(p.tok)} + eg.Pattern.ValuePos = Pos(p.npos + 1) + start := p.npos + lparens := 0 + for _, b := range p.src[start:] { + p.npos++ + if b == '(' { + lparens++ + } else if b == ')' { + if lparens--; lparens < 0 { + eg.Pattern.Value = string(p.src[start : p.npos-1]) + break + } + } + } + p.next() + if lparens != -1 { + p.matchingErr(p.pos, eg.Op, rightParen) + } + return eg + } + return nil +} + +func (p *parser) couldBeArithm() (could bool) { + // save state + oldTok := p.tok + oldNpos := p.npos + oldLines := len(p.f.Lines) + p.next() + lparens := 0 +tokLoop: + for p.tok != _EOF { + switch p.tok { + case leftParen, dollParen: + lparens++ + case dollDblParen, dblLeftParen: + lparens += 2 + case rightParen: + if lparens == 0 { + could = p.peekArithmEnd() + break tokLoop + } + lparens-- + } + p.next() + } + // recover state + p.tok = oldTok + p.npos = oldNpos + p.f.Lines = p.f.Lines[:oldLines] + return +} + +func arithmOpLevel(tok Token) int { + switch tok { + case Comma: + return 0 + case AddAssgn, SubAssgn, MulAssgn, QuoAssgn, RemAssgn, AndAssgn, + OrAssgn, XorAssgn, ShlAssgn, ShrAssgn: + return 1 + case Assgn: + return 2 + case Quest, Colon: + return 3 + case AndExpr, OrExpr: + return 4 + case And, Or, Xor: + return 5 + case Eql, Neq: + return 6 + case Lss, Gtr, Leq, Geq: + return 7 + case Shl, Shr: + return 8 + case Add, Sub: + return 9 + case Mul, Quo, Rem: + return 10 + case Pow: + return 11 + } + return -1 +} + +func (p *parser) arithmExpr(ftok Token, fpos Pos, level int, compact bool) ArithmExpr { + if p.tok == _EOF || p.peekArithmEnd() { + return nil + } + var left ArithmExpr + if level > 11 { + left = p.arithmExprBase(ftok, fpos, compact) + } else { + left = p.arithmExpr(ftok, fpos, level+1, compact) + } + if compact && p.spaced { + return left + } + newLevel := arithmOpLevel(p.tok) + if newLevel < 0 { + switch p.tok { + case _Lit, _LitWord: + p.curErr("not a valid arithmetic operator: %s", p.val) + return nil + case rightParen, _EOF: + default: + if p.quote == arithmExpr { + p.curErr("not a valid arithmetic operator: %v", p.tok) + return nil + } + } + } + if newLevel < 0 || newLevel < level { + return left + } + b := &BinaryArithm{ + OpPos: p.pos, + Op: p.tok, + X: left, + } + if p.next(); compact && p.spaced { + p.followErr(b.OpPos, b.Op.String(), "an expression") + } + if b.Y = p.arithmExpr(b.Op, b.OpPos, newLevel, compact); b.Y == nil { + p.followErr(b.OpPos, b.Op.String(), "an expression") + } + return b +} + +func (p *parser) arithmExprBase(ftok Token, fpos Pos, compact bool) ArithmExpr { + var x ArithmExpr + switch p.tok { + case Inc, Dec, Not: + pre := &UnaryArithm{OpPos: p.pos, Op: p.tok} + p.next() + pre.X = p.arithmExprBase(pre.Op, pre.OpPos, compact) + return pre + case leftParen: + pe := &ParenArithm{Lparen: p.pos} + p.next() + if pe.X = p.arithmExpr(leftParen, pe.Lparen, 0, false); pe.X == nil { + p.posErr(pe.Lparen, "parentheses must enclose an expression") + } + pe.Rparen = p.matched(pe.Lparen, leftParen, rightParen) + x = pe + case Add, Sub: + ue := &UnaryArithm{OpPos: p.pos, Op: p.tok} + if p.next(); compact && p.spaced { + p.followErr(ue.OpPos, ue.Op.String(), "an expression") + } + if ue.X = p.arithmExpr(ue.Op, ue.OpPos, 0, compact); ue.X == nil { + p.followErr(ue.OpPos, ue.Op.String(), "an expression") + } + x = ue + case bckQuote: + if p.quote == arithmExprLet { + return nil + } + fallthrough + default: + w := p.word() + if w.Parts == nil { + p.followErr(fpos, ftok.String(), "an expression") + } + x = &w + } + if compact && p.spaced { + return x + } + if p.tok == Inc || p.tok == Dec { + u := &UnaryArithm{ + Post: true, + OpPos: p.pos, + Op: p.tok, + X: x, + } + p.next() + return u + } + return x +} + +func (p *parser) gotParamLit(l *Lit) bool { + l.ValuePos = p.pos + switch p.tok { + case _Lit, _LitWord: + l.Value = p.val + case dollar: + l.Value = "$" + case Quest: + l.Value = "?" + case Hash: + l.Value = "#" + case Sub: + l.Value = "-" + default: + return false + } + p.next() + return true +} + +func (p *parser) paramExp() *ParamExp { + pe := &ParamExp{Dollar: p.pos} + old := p.preNested(paramExpName) + p.next() + switch p.tok { + case dblHash: + p.tok = Hash + p.npos-- + fallthrough + case Hash: + if p.npos < len(p.src) && p.src[p.npos] != '}' { + pe.Length = true + p.next() + } + } + if !p.gotParamLit(&pe.Param) && !pe.Length { + p.posErr(pe.Dollar, "parameter expansion requires a literal") + } + if p.tok == rightBrace { + pe.Rbrace = p.pos + p.postNested(old) + p.next() + return pe + } + if p.tok == leftBrack { + if !p.bash() { + p.curErr("arrays are a bash feature") + } + lpos := p.pos + p.quote = paramExpInd + p.next() + pe.Ind = &Index{Word: p.word()} + p.quote = paramExpName + p.matched(lpos, leftBrack, rightBrack) + } + switch p.tok { + case rightBrace: + pe.Rbrace = p.pos + p.postNested(old) + p.next() + return pe + case Quo, dblQuo: + pe.Repl = &Replace{All: p.tok == dblQuo} + p.quote = paramExpRepl + p.next() + pe.Repl.Orig = p.word() + if p.tok == Quo { + p.quote = paramExpExp + p.next() + pe.Repl.With = p.word() + } + case Colon: + if !p.bash() { + p.curErr("slicing is a bash feature") + } + pe.Slice = &Slice{} + colonPos := p.pos + p.next() + if p.tok != Colon { + pe.Slice.Offset = p.followWordTok(Colon, colonPos) + } + colonPos = p.pos + if p.got(Colon) { + pe.Slice.Length = p.followWordTok(Colon, colonPos) + } + case Xor, dblXor, Comma, dblComma: + if !p.bash() { + p.curErr("case expansions are a bash feature") + } + fallthrough + default: + pe.Exp = &Expansion{Op: ParExpOperator(p.tok)} + p.quote = paramExpExp + p.next() + pe.Exp.Word = p.word() + } + p.postNested(old) + pe.Rbrace = p.pos + p.matched(pe.Dollar, dollBrace, rightBrace) + return pe +} + +func (p *parser) peekArithmEnd() bool { + return p.tok == rightParen && p.npos < len(p.src) && p.src[p.npos] == ')' +} + +func (p *parser) arithmEnd(ltok Token, lpos Pos, old saveState) Pos { + if p.peekArithmEnd() { + p.npos++ + } else { + p.matchingErr(lpos, ltok, dblRightParen) + } + p.postNested(old) + pos := p.pos + p.next() + return pos +} + +func stopToken(tok Token) bool { + switch tok { + case _EOF, semicolon, And, Or, AndExpr, OrExpr, pipeAll, dblSemicolon, + semiFall, dblSemiFall, rightParen: + return true + } + return false +} + +func (p *parser) validIdent() bool { + if p.asPos <= 0 { + return false + } + s := p.val[:p.asPos] + for i, c := range s { + switch { + case 'a' <= c && c <= 'z': + case 'A' <= c && c <= 'Z': + case c == '_': + case i > 0 && '0' <= c && c <= '9': + case i > 0 && (c == '[' || c == ']') && p.bash(): + default: + return false + } + } + return true +} + +func (p *parser) getAssign() *Assign { + asPos := p.asPos + as := &Assign{Name: p.lit(p.pos, p.val[:asPos])} + if p.val[asPos] == '+' { + as.Append = true + asPos++ + } + start := p.lit(p.pos+1, p.val[asPos+1:]) + if start.Value != "" { + start.ValuePos += Pos(asPos) + as.Value.Parts = p.singleWps(start) + } + p.next() + if p.spaced { + return as + } + if start.Value == "" && p.tok == leftParen { + if !p.bash() { + p.curErr("arrays are a bash feature") + } + ae := &ArrayExpr{Lparen: p.pos} + p.next() + for p.tok != _EOF && p.tok != rightParen { + if w := p.word(); w.Parts == nil { + p.curErr("array elements must be words") + } else { + ae.List = append(ae.List, w) + } + } + ae.Rparen = p.matched(ae.Lparen, leftParen, rightParen) + as.Value.Parts = p.singleWps(ae) + } else if !p.newLine && !stopToken(p.tok) { + if w := p.word(); start.Value == "" { + as.Value = w + } else { + as.Value.Parts = append(as.Value.Parts, w.Parts...) + } + } + return as +} + +func litRedir(src []byte, npos int) bool { + return npos < len(src) && (src[npos] == '>' || src[npos] == '<') +} + +func (p *parser) peekRedir() bool { + switch p.tok { + case _LitWord: + return litRedir(p.src, p.npos) + case Gtr, Shr, Lss, dplIn, dplOut, clbOut, rdrInOut, Shl, dashHdoc, + wordHdoc, rdrAll, appAll: + return true + } + return false +} + +func (p *parser) doRedirect(s *Stmt) { + r := &Redirect{} + var l Lit + if p.gotLit(&l) { + r.N = &l + } + r.Op, r.OpPos = RedirOperator(p.tok), p.pos + p.next() + switch r.Op { + case Hdoc, DashHdoc: + old := p.quote + p.quote = hdocWord + if p.newLine { + p.curErr("heredoc stop word must be on the same line") + } + p.heredocs = append(p.heredocs, r) + r.Word = p.followWordTok(Token(r.Op), r.OpPos) + p.quote = old + p.next() + default: + if p.newLine { + p.curErr("redirect word must be on the same line") + } + r.Word = p.followWordTok(Token(r.Op), r.OpPos) + } + s.Redirs = append(s.Redirs, r) +} + +func (p *parser) getStmt(readEnd bool) (s *Stmt, gotEnd bool) { + s = p.stmt(p.pos) + if p.gotRsrv("!") { + s.Negated = true + } +preLoop: + for { + switch p.tok { + case _Lit, _LitWord: + if p.validIdent() { + s.Assigns = append(s.Assigns, p.getAssign()) + } else if litRedir(p.src, p.npos) { + p.doRedirect(s) + } else { + break preLoop + } + case Gtr, Shr, Lss, dplIn, dplOut, clbOut, rdrInOut, Shl, dashHdoc, + wordHdoc, rdrAll, appAll: + p.doRedirect(s) + default: + break preLoop + } + switch { + case p.newLine, p.tok == _EOF: + return + case p.tok == semicolon: + if readEnd { + s.SemiPos = p.pos + p.next() + gotEnd = true + } + return + } + } + if s = p.gotStmtPipe(s); s == nil { + return + } + switch p.tok { + case AndExpr, OrExpr: + b := &BinaryCmd{OpPos: p.pos, Op: BinCmdOperator(p.tok), X: s} + p.next() + if b.Y, _ = p.getStmt(false); b.Y == nil { + p.followErr(b.OpPos, b.Op.String(), "a statement") + } + s = p.stmt(s.Position) + s.Cmd = b + if readEnd && p.gotSameLine(semicolon) { + gotEnd = true + } + case And: + p.next() + s.Background = true + gotEnd = true + case semicolon: + if !p.newLine && readEnd { + s.SemiPos = p.pos + p.next() + gotEnd = true + } + } + return +} + +func bashDeclareWord(s string) bool { + switch s { + case "declare", "local", "export", "readonly", "typeset", "nameref": + return true + } + return false +} + +func (p *parser) gotStmtPipe(s *Stmt) *Stmt { + switch p.tok { + case leftParen: + s.Cmd = p.subshell() + case dblLeftParen: + s.Cmd = p.arithmExpCmd() + case _LitWord: + switch { + case p.val == "}": + p.curErr("%s can only be used to close a block", p.val) + case p.val == "{": + s.Cmd = p.block() + case p.val == "if": + s.Cmd = p.ifClause() + case p.val == "while": + s.Cmd = p.whileClause() + case p.val == "until": + s.Cmd = p.untilClause() + case p.val == "for": + s.Cmd = p.forClause() + case p.val == "case": + s.Cmd = p.caseClause() + case p.bash() && p.val == "[[": + s.Cmd = p.testClause() + case p.bash() && bashDeclareWord(p.val): + s.Cmd = p.declClause() + case p.bash() && p.val == "eval": + s.Cmd = p.evalClause() + case p.bash() && p.val == "coproc": + s.Cmd = p.coprocClause() + case p.bash() && p.val == "let": + s.Cmd = p.letClause() + case p.bash() && p.val == "function": + s.Cmd = p.bashFuncDecl() + default: + name := Lit{ValuePos: p.pos, Value: p.val} + p.next() + if p.gotSameLine(leftParen) { + p.follow(name.ValuePos, "foo(", rightParen) + s.Cmd = p.funcDecl(name, name.ValuePos) + } else { + s.Cmd = p.callExpr(s, Word{ + Parts: p.singleWps(&name), + }) + } + } + case _Lit, dollBrace, dollDblParen, dollParen, dollar, cmdIn, cmdOut, sglQuote, + dollSglQuote, dblQuote, dollDblQuote, bckQuote, dollBrack, globQuest, globMul, globAdd, + globAt, globNot: + w := Word{Parts: p.wordParts()} + if p.gotSameLine(leftParen) && p.err == nil { + rawName := string(p.src[w.Pos()-1 : w.End()-1]) + p.posErr(w.Pos(), "invalid func name: %q", rawName) + } + s.Cmd = p.callExpr(s, w) + } + for !p.newLine && p.peekRedir() { + p.doRedirect(s) + } + if s.Cmd == nil && len(s.Redirs) == 0 && !s.Negated && len(s.Assigns) == 0 { + return nil + } + if p.tok == Or || p.tok == pipeAll { + b := &BinaryCmd{OpPos: p.pos, Op: BinCmdOperator(p.tok), X: s} + p.next() + if b.Y = p.gotStmtPipe(p.stmt(p.pos)); b.Y == nil { + p.followErr(b.OpPos, b.Op.String(), "a statement") + } + s = p.stmt(s.Position) + s.Cmd = b + } + return s +} + +func (p *parser) subshell() *Subshell { + s := &Subshell{Lparen: p.pos} + old := p.preNested(subCmd) + p.next() + s.Stmts = p.stmts() + p.postNested(old) + s.Rparen = p.matched(s.Lparen, leftParen, rightParen) + return s +} + +func (p *parser) arithmExpCmd() Command { + ar := &ArithmCmd{Left: p.pos} + old := p.preNested(arithmExprCmd) + if !p.couldBeArithm() { + p.postNested(old) + p.npos = int(ar.Left) + p.tok = leftParen + p.pos = ar.Left + s := p.subshell() + if p.err != nil { + p.err = nil + p.matchingErr(ar.Left, dblLeftParen, dblRightParen) + } + return s + } + p.next() + ar.X = p.arithmExpr(dblLeftParen, ar.Left, 0, false) + ar.Right = p.arithmEnd(dblLeftParen, ar.Left, old) + return ar +} + +func (p *parser) block() *Block { + b := &Block{Lbrace: p.pos} + p.next() + b.Stmts = p.stmts("}") + b.Rbrace = p.pos + if !p.gotRsrv("}") { + p.matchingErr(b.Lbrace, "{", "}") + } + return b +} + +func (p *parser) ifClause() *IfClause { + ic := &IfClause{If: p.pos} + p.next() + ic.CondStmts = p.followStmts("if", ic.If, "then") + ic.Then = p.followRsrv(ic.If, "if ", "then") + ic.ThenStmts = p.followStmts("then", ic.Then, "fi", "elif", "else") + elifPos := p.pos + for p.gotRsrv("elif") { + elf := &Elif{Elif: elifPos} + elf.CondStmts = p.followStmts("elif", elf.Elif, "then") + elf.Then = p.followRsrv(elf.Elif, "elif ", "then") + elf.ThenStmts = p.followStmts("then", elf.Then, "fi", "elif", "else") + ic.Elifs = append(ic.Elifs, elf) + elifPos = p.pos + } + if elsePos := p.pos; p.gotRsrv("else") { + ic.Else = elsePos + ic.ElseStmts = p.followStmts("else", ic.Else, "fi") + } + ic.Fi = p.stmtEnd(ic, "if", "fi") + return ic +} + +func (p *parser) whileClause() *WhileClause { + wc := &WhileClause{While: p.pos} + p.next() + wc.CondStmts = p.followStmts("while", wc.While, "do") + wc.Do = p.followRsrv(wc.While, "while ", "do") + wc.DoStmts = p.followStmts("do", wc.Do, "done") + wc.Done = p.stmtEnd(wc, "while", "done") + return wc +} + +func (p *parser) untilClause() *UntilClause { + uc := &UntilClause{Until: p.pos} + p.next() + uc.CondStmts = p.followStmts("until", uc.Until, "do") + uc.Do = p.followRsrv(uc.Until, "until ", "do") + uc.DoStmts = p.followStmts("do", uc.Do, "done") + uc.Done = p.stmtEnd(uc, "until", "done") + return uc +} + +func (p *parser) forClause() *ForClause { + fc := &ForClause{For: p.pos} + p.next() + fc.Loop = p.loop(fc.For) + fc.Do = p.followRsrv(fc.For, "for foo [in words]", "do") + fc.DoStmts = p.followStmts("do", fc.Do, "done") + fc.Done = p.stmtEnd(fc, "for", "done") + return fc +} + +func (p *parser) loop(forPos Pos) Loop { + if p.tok == dblLeftParen { + cl := &CStyleLoop{Lparen: p.pos} + old := p.preNested(arithmExprCmd) + p.next() + if p.tok == dblSemicolon { + p.npos-- + p.tok = semicolon + } + if p.tok != semicolon { + cl.Init = p.arithmExpr(dblLeftParen, cl.Lparen, 0, false) + } + scPos := p.pos + p.follow(p.pos, "expression", semicolon) + if p.tok != semicolon { + cl.Cond = p.arithmExpr(semicolon, scPos, 0, false) + } + scPos = p.pos + p.follow(p.pos, "expression", semicolon) + if p.tok != semicolon { + cl.Post = p.arithmExpr(semicolon, scPos, 0, false) + } + cl.Rparen = p.arithmEnd(dblLeftParen, cl.Lparen, old) + p.gotSameLine(semicolon) + return cl + } + wi := &WordIter{} + if !p.gotLit(&wi.Name) { + p.followErr(forPos, "for", "a literal") + } + if p.gotRsrv("in") { + for !p.newLine && p.tok != _EOF && p.tok != semicolon { + if w := p.word(); w.Parts == nil { + p.curErr("word list can only contain words") + } else { + wi.List = append(wi.List, w) + } + } + p.gotSameLine(semicolon) + } else if !p.newLine && !p.got(semicolon) { + p.followErr(forPos, "for foo", `"in", ; or a newline`) + } + return wi +} + +func (p *parser) caseClause() *CaseClause { + cc := &CaseClause{Case: p.pos} + p.next() + cc.Word = p.followWord("case", cc.Case) + p.followRsrv(cc.Case, "case x", "in") + cc.List = p.patLists() + cc.Esac = p.stmtEnd(cc, "case", "esac") + return cc +} + +func (p *parser) patLists() (pls []*PatternList) { + for p.tok != _EOF && !(p.tok == _LitWord && p.val == "esac") { + pl := &PatternList{} + p.got(leftParen) + for p.tok != _EOF { + if w := p.word(); w.Parts == nil { + p.curErr("case patterns must consist of words") + } else { + pl.Patterns = append(pl.Patterns, w) + } + if p.tok == rightParen { + break + } + if !p.got(Or) { + p.curErr("case patterns must be separated with |") + } + } + old := p.preNested(switchCase) + p.next() + pl.Stmts = p.stmts("esac") + p.postNested(old) + pl.OpPos = p.pos + if p.tok != dblSemicolon && p.tok != semiFall && p.tok != dblSemiFall { + pl.Op = DblSemicolon + pls = append(pls, pl) + break + } + pl.Op = CaseOperator(p.tok) + p.next() + pls = append(pls, pl) + } + return +} + +func (p *parser) testClause() *TestClause { + tc := &TestClause{Left: p.pos} + p.next() + if p.tok == _EOF || p.gotRsrv("]]") { + p.posErr(tc.Left, "test clause requires at least one expression") + } + tc.X = p.testExpr(illegalTok, tc.Left, 0) + tc.Right = p.pos + if !p.gotRsrv("]]") { + p.matchingErr(tc.Left, "[[", "]]") + } + return tc +} + +func (p *parser) testExpr(ftok Token, fpos Pos, level int) TestExpr { + var left TestExpr + if level > 1 { + left = p.testExprBase(ftok, fpos) + } else { + left = p.testExpr(ftok, fpos, level+1) + } + if left == nil { + return left + } + var newLevel int + switch p.tok { + case AndExpr, OrExpr: + case _LitWord: + if p.val == "]]" { + return left + } + fallthrough + case Lss, Gtr: + newLevel = 1 + case _EOF, rightParen: + return left + default: + p.curErr("not a valid test operator: %v", p.tok) + } + if newLevel < level { + return left + } + if p.tok == _LitWord { + if p.tok = testBinaryOp(p.val); p.tok == illegalTok { + p.curErr("not a valid test operator: %s", p.val) + } + } + b := &BinaryTest{ + OpPos: p.pos, + Op: BinTestOperator(p.tok), + X: left, + } + if b.Op == TsReMatch { + old := p.preNested(testRegexp) + p.next() + p.postNested(old) + } else { + p.next() + } + if b.Y = p.testExpr(Token(b.Op), b.OpPos, newLevel); b.Y == nil { + p.followErr(b.OpPos, b.Op.String(), "an expression") + } + return b +} + +func (p *parser) testExprBase(ftok Token, fpos Pos) TestExpr { + switch p.tok { + case _EOF: + return nil + case _LitWord: + if op := testUnaryOp(p.val); op != illegalTok { + p.tok = op + } + } + switch p.tok { + case Not: + u := &UnaryTest{OpPos: p.pos, Op: TsNot} + p.next() + u.X = p.testExpr(Token(u.Op), u.OpPos, 0) + return u + case tsExists, tsRegFile, tsDirect, tsCharSp, tsBlckSp, tsNmPipe, tsSocket, tsSmbLink, + tsGIDSet, tsUIDSet, tsRead, tsWrite, tsExec, tsNoEmpty, tsFdTerm, tsEmpStr, + tsNempStr, tsOptSet, tsVarSet, tsRefVar: + u := &UnaryTest{OpPos: p.pos, Op: UnTestOperator(p.tok)} + p.next() + w := p.followWordTok(ftok, fpos) + u.X = &w + return u + case leftParen: + pe := &ParenTest{Lparen: p.pos} + p.next() + if pe.X = p.testExpr(leftParen, pe.Lparen, 0); pe.X == nil { + p.posErr(pe.Lparen, "parentheses must enclose an expression") + } + pe.Rparen = p.matched(pe.Lparen, leftParen, rightParen) + return pe + case rightParen: + default: + w := p.followWordTok(ftok, fpos) + return &w + } + return nil +} + +func (p *parser) declClause() *DeclClause { + name := p.val + ds := &DeclClause{Position: p.pos} + switch name { + case "declare", "typeset": // typeset is an obsolete synonym + default: + ds.Variant = name + } + p.next() + for p.tok == _LitWord && p.val[0] == '-' { + ds.Opts = append(ds.Opts, p.word()) + } + for !p.newLine && !stopToken(p.tok) && !p.peekRedir() { + if (p.tok == _Lit || p.tok == _LitWord) && p.validIdent() { + ds.Assigns = append(ds.Assigns, p.getAssign()) + } else if w := p.word(); w.Parts == nil { + p.followErr(p.pos, name, "words") + } else { + ds.Assigns = append(ds.Assigns, &Assign{Value: w}) + } + } + return ds +} + +func (p *parser) evalClause() *EvalClause { + ec := &EvalClause{Eval: p.pos} + p.next() + ec.Stmt, _ = p.getStmt(false) + return ec +} + +func isBashCompoundCommand(tok Token, val string) bool { + switch tok { + case leftParen, dblLeftParen: + return true + case _LitWord: + switch val { + case "{", "if", "while", "until", "for", "case", "[[", "eval", + "coproc", "let", "function": + return true + } + if bashDeclareWord(val) { + return true + } + } + return false +} + +func (p *parser) coprocClause() *CoprocClause { + cc := &CoprocClause{Coproc: p.pos} + p.next() + if isBashCompoundCommand(p.tok, p.val) { + // has no name + cc.Stmt, _ = p.getStmt(false) + return cc + } + if p.newLine { + p.posErr(cc.Coproc, "coproc clause requires a command") + } + var l Lit + if p.gotLit(&l) { + cc.Name = &l + } + cc.Stmt, _ = p.getStmt(false) + if cc.Stmt == nil { + if cc.Name == nil { + p.posErr(cc.Coproc, "coproc clause requires a command") + return nil + } + // name was in fact the stmt + cc.Stmt = &Stmt{ + Position: cc.Name.ValuePos, + Cmd: &CallExpr{Args: []Word{ + {Parts: p.singleWps(cc.Name)}, + }}, + } + cc.Name = nil + } else if cc.Name != nil { + if call, ok := cc.Stmt.Cmd.(*CallExpr); ok { + // name was in fact the start of a call + call.Args = append([]Word{{Parts: p.singleWps(cc.Name)}}, + call.Args...) + cc.Name = nil + } + } + return cc +} + +func (p *parser) letClause() *LetClause { + lc := &LetClause{Let: p.pos} + old := p.preNested(arithmExprLet) + p.next() + for !p.newLine && !stopToken(p.tok) && !p.peekRedir() { + x := p.arithmExpr(illegalTok, lc.Let, 0, true) + if x == nil { + break + } + lc.Exprs = append(lc.Exprs, x) + } + if len(lc.Exprs) == 0 { + p.posErr(lc.Let, "let clause requires at least one expression") + } + p.postNested(old) + if p.tok == illegalTok { + p.next() + } + return lc +} + +func (p *parser) bashFuncDecl() *FuncDecl { + fpos := p.pos + p.next() + if p.tok != _LitWord { + if w := p.followWord("function", fpos); p.err == nil { + rawName := string(p.src[w.Pos()-1 : w.End()-1]) + p.posErr(w.Pos(), "invalid func name: %q", rawName) + } + } + name := Lit{ValuePos: p.pos, Value: p.val} + p.next() + if p.gotSameLine(leftParen) { + p.follow(name.ValuePos, "foo(", rightParen) + } + return p.funcDecl(name, fpos) +} + +func (p *parser) callExpr(s *Stmt, w Word) *CallExpr { + alloc := &struct { + ce CallExpr + ws [4]Word + }{} + ce := &alloc.ce + ce.Args = alloc.ws[:1] + ce.Args[0] = w + for !p.newLine { + switch p.tok { + case _EOF, semicolon, And, Or, AndExpr, OrExpr, pipeAll, + dblSemicolon, semiFall, dblSemiFall: + return ce + case _LitWord: + if litRedir(p.src, p.npos) { + p.doRedirect(s) + continue + } + ce.Args = append(ce.Args, Word{ + Parts: p.singleWps(p.lit(p.pos, p.val)), + }) + p.next() + case bckQuote: + if p.quote == subCmdBckquo { + return ce + } + fallthrough + case _Lit, dollBrace, dollDblParen, dollParen, dollar, cmdIn, cmdOut, + sglQuote, dollSglQuote, dblQuote, dollDblQuote, dollBrack, globQuest, + globMul, globAdd, globAt, globNot: + ce.Args = append(ce.Args, Word{Parts: p.wordParts()}) + case Gtr, Shr, Lss, dplIn, dplOut, clbOut, rdrInOut, Shl, + dashHdoc, wordHdoc, rdrAll, appAll: + p.doRedirect(s) + case rightParen: + if p.quote == subCmd { + return ce + } + fallthrough + default: + p.curErr("a command can only contain words and redirects") + } + } + return ce +} + +func (p *parser) funcDecl(name Lit, pos Pos) *FuncDecl { + fd := &FuncDecl{ + Position: pos, + BashStyle: pos != name.ValuePos, + Name: name, + } + if fd.Body, _ = p.getStmt(false); fd.Body == nil { + p.followErr(fd.Pos(), "foo()", "a statement") + } + return fd +} diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/printer.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/printer.go new file mode 100644 index 0000000000..4141f054d7 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/printer.go @@ -0,0 +1,1147 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package syntax + +import ( + "bufio" + "io" + "sync" +) + +// PrintConfig controls how the printing of an AST node will behave. +type PrintConfig struct { + Spaces int // 0 (default) for tabs, >0 for number of spaces +} + +var printerFree = sync.Pool{ + New: func() interface{} { + return &printer{bufWriter: bufio.NewWriter(nil)} + }, +} + +// Fprint "pretty-prints" the given AST file to the given writer. +func (c PrintConfig) Fprint(w io.Writer, f *File) error { + p := printerFree.Get().(*printer) + p.reset() + p.f, p.c = f, c + p.comments = f.Comments + p.bufWriter.Reset(w) + p.stmts(f.Stmts) + p.commentsUpTo(0) + p.newline(0) + err := p.bufWriter.Flush() + printerFree.Put(p) + return err +} + +// Fprint "pretty-prints" the given AST file to the given writer. It +// calls PrintConfig.Fprint with its default settings. +func Fprint(w io.Writer, f *File) error { + return PrintConfig{}.Fprint(w, f) +} + +type bufWriter interface { + WriteByte(byte) error + WriteString(string) (int, error) + Reset(io.Writer) + Flush() error +} + +type printer struct { + bufWriter + + f *File + c PrintConfig + + wantSpace bool + wantNewline bool + wroteSemi bool + + commentPadding int + + // nline is the position of the next newline + nline Pos + nlineIndex int + + // lastLevel is the last level of indentation that was used. + lastLevel int + // level is the current level of indentation. + level int + // levelIncs records which indentation level increments actually + // took place, to revert them once their section ends. + levelIncs []bool + + nestedBinary bool + + // comments is the list of pending comments to write. + comments []*Comment + + // pendingHdocs is the list of pending heredocs to write. + pendingHdocs []*Redirect + + // used in stmtLen to align comments + lenPrinter *printer + lenCounter byteCounter +} + +func (p *printer) reset() { + p.wantSpace, p.wantNewline = false, false + p.commentPadding = 0 + p.nline, p.nlineIndex = 0, 0 + p.lastLevel, p.level = 0, 0 + p.levelIncs = p.levelIncs[:0] + p.nestedBinary = false + p.pendingHdocs = p.pendingHdocs[:0] +} + +func (p *printer) incLine() { + if p.nlineIndex++; p.nlineIndex >= len(p.f.Lines) { + p.nline = maxPos + } else { + p.nline = Pos(p.f.Lines[p.nlineIndex]) + } +} + +func (p *printer) incLines(pos Pos) { + for p.nline < pos { + p.incLine() + } +} + +func (p *printer) space() { + p.WriteByte(' ') + p.wantSpace = false +} + +func (p *printer) spaces(n int) { + for i := 0; i < n; i++ { + p.WriteByte(' ') + } +} + +func (p *printer) tabs(n int) { + for i := 0; i < n; i++ { + p.WriteByte('\t') + } +} + +func (p *printer) bslashNewl() { + p.WriteString(" \\\n") + p.wantSpace = false + p.incLine() +} + +func (p *printer) spacedString(s string, spaceAfter bool) { + if p.wantSpace { + p.WriteByte(' ') + } + p.WriteString(s) + p.wantSpace = spaceAfter +} + +func (p *printer) semiOrNewl(s string, pos Pos) { + if p.wantNewline { + p.newline(pos) + p.indent() + } else { + if !p.wroteSemi { + p.WriteByte(';') + } + p.WriteByte(' ') + } + p.incLines(pos) + p.WriteString(s) + p.wantSpace = true +} + +func (p *printer) incLevel() { + inc := false + if p.level <= p.lastLevel { + p.level++ + inc = true + } else if last := &p.levelIncs[len(p.levelIncs)-1]; *last { + *last = false + inc = true + } + p.levelIncs = append(p.levelIncs, inc) +} + +func (p *printer) decLevel() { + if p.levelIncs[len(p.levelIncs)-1] { + p.level-- + } + p.levelIncs = p.levelIncs[:len(p.levelIncs)-1] +} + +func (p *printer) indent() { + p.lastLevel = p.level + switch { + case p.level == 0: + case p.c.Spaces == 0: + p.tabs(p.level) + case p.c.Spaces > 0: + p.spaces(p.c.Spaces * p.level) + } +} + +func (p *printer) newline(pos Pos) { + p.wantNewline, p.wantSpace = false, false + p.WriteByte('\n') + if pos > p.nline { + p.incLine() + } + hdocs := p.pendingHdocs + p.pendingHdocs = p.pendingHdocs[:0] + for _, r := range hdocs { + p.word(r.Hdoc) + p.incLines(r.Hdoc.End() + 1) + p.unquotedWord(r.Word) + p.WriteByte('\n') + p.incLine() + p.wantSpace = false + } +} + +func (p *printer) newlines(pos Pos) { + p.newline(pos) + if pos > p.nline { + // preserve single empty lines + p.WriteByte('\n') + p.incLine() + } + p.indent() +} + +func (p *printer) commentsAndSeparate(pos Pos) { + p.commentsUpTo(pos) + if p.wantNewline || pos > p.nline { + p.newlines(pos) + } +} + +func (p *printer) sepTok(s string, pos Pos) { + p.level++ + p.commentsUpTo(pos) + p.level-- + if p.wantNewline || pos > p.nline { + p.newlines(pos) + } + p.WriteString(s) + p.wantSpace = true +} + +func (p *printer) semiRsrv(s string, pos Pos, fallback bool) { + p.level++ + p.commentsUpTo(pos) + p.level-- + if p.wantNewline || pos > p.nline { + p.newlines(pos) + } else if fallback { + if !p.wroteSemi { + p.WriteByte(';') + } + p.WriteByte(' ') + } else if p.wantSpace { + p.WriteByte(' ') + } + p.WriteString(s) + p.wantSpace = true +} + +func (p *printer) commentsUpTo(pos Pos) { + if len(p.comments) < 1 { + return + } + c := p.comments[0] + if pos > 0 && c.Hash >= pos { + return + } + p.comments = p.comments[1:] + switch { + case p.nlineIndex == 0: + case c.Hash >= p.nline: + p.newlines(c.Hash) + default: + p.spaces(p.commentPadding + 1) + } + p.incLines(c.Hash) + p.WriteByte('#') + p.WriteString(c.Text) + p.commentsUpTo(pos) +} + +func (p *printer) expansionOp(op ParExpOperator) { + switch op { + case SubstAdd: + p.WriteByte('+') + case SubstColAdd: + p.WriteString(":+") + case SubstSub: + p.WriteByte('-') + case SubstColSub: + p.WriteString(":-") + case SubstQuest: + p.WriteByte('?') + case SubstColQuest: + p.WriteString(":?") + case SubstAssgn: + p.WriteByte('=') + case SubstColAssgn: + p.WriteString(":=") + case RemSmallSuffix: + p.WriteByte('%') + case RemLargeSuffix: + p.WriteString("%%") + case RemSmallPrefix: + p.WriteByte('#') + case RemLargePrefix: + p.WriteString("##") + case UpperFirst: + p.WriteByte('^') + case UpperAll: + p.WriteString("^^") + case LowerFirst: + p.WriteByte(',') + default: // LowerAll + p.WriteString(",,") + } +} + +func (p *printer) wordPart(wp WordPart) { + switch x := wp.(type) { + case *Lit: + p.WriteString(x.Value) + case *SglQuoted: + if x.Dollar { + p.WriteByte('$') + } + p.WriteByte('\'') + p.WriteString(x.Value) + p.WriteByte('\'') + p.incLines(x.End()) + case *DblQuoted: + if x.Dollar { + p.WriteByte('$') + } + p.WriteByte('"') + for i, n := range x.Parts { + p.wordPart(n) + if i == len(x.Parts)-1 { + p.incLines(n.End()) + } + } + p.WriteByte('"') + case *CmdSubst: + p.incLines(x.Pos()) + p.WriteString("$(") + p.wantSpace = len(x.Stmts) > 0 && startsWithLparen(x.Stmts[0]) + p.nestedStmts(x.Stmts, x.Right) + p.sepTok(")", x.Right) + case *ParamExp: + if x.Short { + p.WriteByte('$') + p.WriteString(x.Param.Value) + break + } + p.WriteString("${") + if x.Length { + p.WriteByte('#') + } + p.WriteString(x.Param.Value) + if x.Ind != nil { + p.WriteByte('[') + p.word(x.Ind.Word) + p.WriteByte(']') + } + if x.Slice != nil { + p.WriteByte(':') + p.word(x.Slice.Offset) + if w2 := x.Slice.Length; w2.Parts != nil { + p.WriteByte(':') + p.word(w2) + } + } else if x.Repl != nil { + if x.Repl.All { + p.WriteByte('/') + } + p.WriteByte('/') + p.word(x.Repl.Orig) + p.WriteByte('/') + p.word(x.Repl.With) + } else if x.Exp != nil { + p.expansionOp(x.Exp.Op) + p.word(x.Exp.Word) + } + p.WriteByte('}') + case *ArithmExp: + p.WriteString("$((") + p.arithmExpr(x.X, false) + p.WriteString("))") + case *ArrayExpr: + p.wantSpace = false + p.WriteByte('(') + p.wordJoin(x.List, false) + p.sepTok(")", x.Rparen) + case *ExtGlob: + p.wantSpace = false + p.WriteString(x.Op.String()) + p.WriteString(x.Pattern.Value) + p.WriteByte(')') + case *ProcSubst: + // avoid conflict with << and others + if p.wantSpace { + p.space() + } + switch x.Op { + case CmdIn: + p.WriteString("<(") + default: // CmdOut + p.WriteString(">(") + } + p.nestedStmts(x.Stmts, 0) + p.WriteByte(')') + } + p.wantSpace = true +} + +func (p *printer) loop(loop Loop) { + switch x := loop.(type) { + case *WordIter: + p.WriteString(x.Name.Value) + if len(x.List) > 0 { + p.WriteString(" in") + p.wordJoin(x.List, true) + } + case *CStyleLoop: + p.WriteString("((") + if x.Init == nil { + p.WriteByte(' ') + } + p.arithmExpr(x.Init, false) + p.WriteString("; ") + p.arithmExpr(x.Cond, false) + p.WriteString("; ") + p.arithmExpr(x.Post, false) + p.WriteString("))") + } +} + +func (p *printer) binaryExprOp(tok Token) { + switch tok { + case Assgn: + p.WriteByte('=') + case Add: + p.WriteByte('+') + case Sub: + p.WriteByte('-') + case Rem: + p.WriteByte('%') + case Mul: + p.WriteByte('*') + case Quo: + p.WriteByte('/') + case And: + p.WriteByte('&') + case Or: + p.WriteByte('|') + case AndExpr: + p.WriteString("&&") + case OrExpr: + p.WriteString("||") + case Xor: + p.WriteByte('^') + case Pow: + p.WriteString("**") + case Eql: + p.WriteString("==") + case Neq: + p.WriteString("!=") + case Leq: + p.WriteString("<=") + case Geq: + p.WriteString(">=") + case AddAssgn: + p.WriteString("+=") + case SubAssgn: + p.WriteString("-=") + case MulAssgn: + p.WriteString("*=") + case QuoAssgn: + p.WriteString("/=") + case RemAssgn: + p.WriteString("%=") + case AndAssgn: + p.WriteString("&=") + case OrAssgn: + p.WriteString("|=") + case XorAssgn: + p.WriteString("^=") + case ShlAssgn: + p.WriteString("<<=") + case ShrAssgn: + p.WriteString(">>=") + case Lss: + p.WriteByte('<') + case Gtr: + p.WriteByte('>') + case Shl: + p.WriteString("<<") + case Shr: + p.WriteString(">>") + case Quest: + p.WriteByte('?') + case Colon: + p.WriteByte(':') + default: // Comma + p.WriteByte(',') + } +} + +func (p *printer) unaryExprOp(tok Token) { + switch tok { + case Add: + p.WriteByte('+') + case Sub: + p.WriteByte('-') + case Not: + p.WriteByte('!') + case Inc: + p.WriteString("++") + default: // Dec + p.WriteString("--") + } +} + +func (p *printer) arithmExpr(expr ArithmExpr, compact bool) { + p.wantSpace = false + switch x := expr.(type) { + case *Word: + p.word(*x) + case *BinaryArithm: + if compact { + p.arithmExpr(x.X, compact) + p.binaryExprOp(x.Op) + p.arithmExpr(x.Y, compact) + } else { + p.arithmExpr(x.X, compact) + if x.Op != Comma { + p.WriteByte(' ') + } + p.binaryExprOp(x.Op) + p.space() + p.arithmExpr(x.Y, compact) + } + case *UnaryArithm: + if x.Post { + p.arithmExpr(x.X, compact) + p.unaryExprOp(x.Op) + } else { + p.unaryExprOp(x.Op) + p.arithmExpr(x.X, compact) + } + case *ParenArithm: + p.WriteByte('(') + p.arithmExpr(x.X, false) + p.WriteByte(')') + } +} + +func (p *printer) unaryTestOp(op UnTestOperator) { + switch op { + case TsNot: + p.WriteByte('!') + case TsExists: + p.WriteString("-e") + case TsRegFile: + p.WriteString("-f") + case TsDirect: + p.WriteString("-d") + case TsCharSp: + p.WriteString("-c") + case TsBlckSp: + p.WriteString("-b") + case TsNmPipe: + p.WriteString("-p") + case TsSocket: + p.WriteString("-S") + case TsSmbLink: + p.WriteString("-L") + case TsGIDSet: + p.WriteString("-g") + case TsUIDSet: + p.WriteString("-u") + case TsRead: + p.WriteString("-r") + case TsWrite: + p.WriteString("-w") + case TsExec: + p.WriteString("-x") + case TsNoEmpty: + p.WriteString("-s") + case TsFdTerm: + p.WriteString("-t") + case TsEmpStr: + p.WriteString("-z") + case TsNempStr: + p.WriteString("-n") + case TsOptSet: + p.WriteString("-o") + case TsVarSet: + p.WriteString("-v") + default: // TsRefVar + p.WriteString("-R") + } +} + +func (p *printer) binaryTestOp(op BinTestOperator) { + switch op { + case AndTest: + p.WriteString("&&") + case OrTest: + p.WriteString("||") + case TsAssgn: + p.WriteByte('=') + case TsEqual: + p.WriteString("==") + case TsNequal: + p.WriteString("!=") + case TsReMatch: + p.WriteString("=~") + case TsNewer: + p.WriteString("-nt") + case TsOlder: + p.WriteString("-ot") + case TsDevIno: + p.WriteString("-ef") + case TsEql: + p.WriteString("-eq") + case TsNeq: + p.WriteString("-ne") + case TsLeq: + p.WriteString("-le") + case TsGeq: + p.WriteString("-ge") + case TsLss: + p.WriteString("-lt") + case TsGtr: + p.WriteString("-gt") + case TsBefore: + p.WriteByte('<') + case TsAfter: + p.WriteByte('>') + } +} + +func (p *printer) testExpr(expr TestExpr) { + p.wantSpace = false + switch x := expr.(type) { + case *Word: + p.word(*x) + case *BinaryTest: + p.testExpr(x.X) + p.space() + p.binaryTestOp(x.Op) + p.space() + p.testExpr(x.Y) + case *UnaryTest: + p.unaryTestOp(x.Op) + p.space() + p.testExpr(x.X) + case *ParenTest: + p.WriteByte('(') + p.testExpr(x.X) + p.WriteByte(')') + } +} + +func (p *printer) word(w Word) { + for _, n := range w.Parts { + p.wordPart(n) + } +} + +func (p *printer) unquotedWord(w Word) { + for _, wp := range w.Parts { + switch x := wp.(type) { + case *SglQuoted: + p.WriteString(x.Value) + case *DblQuoted: + for _, qp := range x.Parts { + p.wordPart(qp) + } + case *Lit: + if x.Value[0] == '\\' { + p.WriteString(x.Value[1:]) + } else { + p.WriteString(x.Value) + } + default: + p.wordPart(wp) + } + } +} + +func (p *printer) wordJoin(ws []Word, backslash bool) { + anyNewline := false + for _, w := range ws { + if pos := w.Pos(); pos > p.nline { + p.commentsUpTo(pos) + if backslash { + p.bslashNewl() + } else { + p.WriteByte('\n') + p.incLine() + } + if !anyNewline { + p.incLevel() + anyNewline = true + } + p.indent() + } else if p.wantSpace { + p.space() + } + for _, n := range w.Parts { + p.wordPart(n) + } + } + if anyNewline { + p.decLevel() + } +} + +func (p *printer) stmt(s *Stmt) { + if s.Negated { + p.spacedString("!", true) + } + p.assigns(s.Assigns) + startRedirs := p.command(s.Cmd, s.Redirs) + anyNewline := false + for _, r := range s.Redirs[startRedirs:] { + if r.OpPos > p.nline { + p.bslashNewl() + if !anyNewline { + p.incLevel() + anyNewline = true + } + p.indent() + } + p.commentsAndSeparate(r.OpPos) + if p.wantSpace { + p.WriteByte(' ') + } + if r.N != nil { + p.WriteString(r.N.Value) + } + p.redirectOp(r.Op) + p.wantSpace = true + p.word(r.Word) + if r.Op == Hdoc || r.Op == DashHdoc { + p.pendingHdocs = append(p.pendingHdocs, r) + } + } + p.wroteSemi = false + if s.SemiPos > 0 && s.SemiPos > p.nline { + p.incLevel() + p.bslashNewl() + p.indent() + p.decLevel() + p.WriteByte(';') + p.wroteSemi = true + } else if s.Background { + p.WriteString(" &") + } + if anyNewline { + p.decLevel() + } +} + +func (p *printer) redirectOp(op RedirOperator) { + switch op { + case RdrIn: + p.WriteByte('<') + case RdrOut: + p.WriteByte('>') + case Hdoc: + p.WriteString("<<") + case AppOut: + p.WriteString(">>") + case RdrInOut: + p.WriteString("<>") + case DplIn: + p.WriteString("<&") + case DplOut: + p.WriteString(">&") + case ClbOut: + p.WriteString(">|") + case DashHdoc: + p.WriteString("<<-") + case WordHdoc: + p.WriteString("<<<") + case RdrAll: + p.WriteString("&>") + default: // AppAll + p.WriteString("&>>") + } +} + +func binaryCmdOp(op BinCmdOperator) string { + switch op { + case AndStmt: + return "&&" + case OrStmt: + return "||" + case Pipe: + return "|" + default: // PipeAll + return "|&" + } +} + +func caseClauseOp(op CaseOperator) string { + switch op { + case DblSemicolon: + return ";;" + case SemiFall: + return ";&" + default: // DblSemiFall + return ";;&" + } +} + +func (p *printer) command(cmd Command, redirs []*Redirect) (startRedirs int) { + switch x := cmd.(type) { + case *CallExpr: + if len(x.Args) <= 1 { + p.wordJoin(x.Args, true) + return 0 + } + p.wordJoin(x.Args[:1], true) + for _, r := range redirs { + if r.Pos() > x.Args[1].Pos() || r.Op == Hdoc || r.Op == DashHdoc { + break + } + if p.wantSpace { + p.space() + } + if r.N != nil { + p.WriteString(r.N.Value) + } + p.redirectOp(r.Op) + p.wantSpace = true + p.word(r.Word) + startRedirs++ + } + p.wordJoin(x.Args[1:], true) + case *Block: + p.spacedString("{", true) + p.nestedStmts(x.Stmts, x.Rbrace) + p.semiRsrv("}", x.Rbrace, true) + case *IfClause: + p.spacedString("if", true) + p.nestedStmts(x.CondStmts, 0) + p.semiOrNewl("then", x.Then) + p.nestedStmts(x.ThenStmts, 0) + for _, el := range x.Elifs { + p.semiRsrv("elif", el.Elif, true) + p.nestedStmts(el.CondStmts, 0) + p.semiOrNewl("then", el.Then) + p.nestedStmts(el.ThenStmts, 0) + } + if len(x.ElseStmts) > 0 { + p.semiRsrv("else", x.Else, true) + p.nestedStmts(x.ElseStmts, 0) + } else if x.Else > 0 { + p.incLines(x.Else) + } + p.semiRsrv("fi", x.Fi, true) + case *Subshell: + p.spacedString("(", false) + p.wantSpace = len(x.Stmts) > 0 && startsWithLparen(x.Stmts[0]) + p.nestedStmts(x.Stmts, x.Rparen) + p.sepTok(")", x.Rparen) + case *WhileClause: + p.spacedString("while", true) + p.nestedStmts(x.CondStmts, 0) + p.semiOrNewl("do", x.Do) + p.nestedStmts(x.DoStmts, 0) + p.semiRsrv("done", x.Done, true) + case *ForClause: + p.spacedString("for ", true) + p.loop(x.Loop) + p.semiOrNewl("do", x.Do) + p.nestedStmts(x.DoStmts, 0) + p.semiRsrv("done", x.Done, true) + case *BinaryCmd: + p.stmt(x.X) + indent := !p.nestedBinary + if indent { + p.incLevel() + } + _, p.nestedBinary = x.Y.Cmd.(*BinaryCmd) + if len(p.pendingHdocs) == 0 && x.Y.Pos() > p.nline { + p.bslashNewl() + p.indent() + } + p.spacedString(binaryCmdOp(x.Op), true) + p.incLines(x.Y.Pos()) + p.stmt(x.Y) + if indent { + p.decLevel() + } + p.nestedBinary = false + case *FuncDecl: + if x.BashStyle { + p.WriteString("function ") + } + p.WriteString(x.Name.Value) + p.WriteString("() ") + p.incLines(x.Body.Pos()) + p.stmt(x.Body) + case *CaseClause: + p.spacedString("case ", true) + p.word(x.Word) + p.WriteString(" in") + p.incLevel() + for _, pl := range x.List { + p.commentsAndSeparate(pl.Patterns[0].Pos()) + for i, w := range pl.Patterns { + if i > 0 { + p.spacedString("|", true) + } + if p.wantSpace { + p.WriteByte(' ') + } + for _, n := range w.Parts { + p.wordPart(n) + } + } + p.WriteByte(')') + sep := len(pl.Stmts) > 1 || (len(pl.Stmts) > 0 && pl.Stmts[0].Pos() > p.nline) + p.nestedStmts(pl.Stmts, 0) + p.level++ + if sep { + p.sepTok(caseClauseOp(pl.Op), pl.OpPos) + } else { + p.spacedString(caseClauseOp(pl.Op), true) + } + p.incLines(pl.OpPos) + p.level-- + if sep || pl.OpPos == x.Esac { + p.wantNewline = true + } + } + p.decLevel() + p.semiRsrv("esac", x.Esac, len(x.List) == 0) + case *UntilClause: + p.spacedString("until", true) + p.nestedStmts(x.CondStmts, 0) + p.semiOrNewl("do", x.Do) + p.nestedStmts(x.DoStmts, 0) + p.semiRsrv("done", x.Done, true) + case *ArithmCmd: + if p.wantSpace { + p.space() + } + p.WriteString("((") + p.arithmExpr(x.X, false) + p.WriteString("))") + case *TestClause: + p.spacedString("[[", true) + p.space() + p.testExpr(x.X) + p.spacedString("]]", true) + case *DeclClause: + name := x.Variant + if name == "" { + name = "declare" + } + p.spacedString(name, true) + for _, w := range x.Opts { + p.WriteByte(' ') + p.word(w) + } + p.assigns(x.Assigns) + case *EvalClause: + p.spacedString("eval", true) + if x.Stmt != nil { + p.stmt(x.Stmt) + } + case *CoprocClause: + p.spacedString("coproc", true) + if x.Name != nil { + p.WriteByte(' ') + p.WriteString(x.Name.Value) + } + p.stmt(x.Stmt) + case *LetClause: + p.spacedString("let", true) + for _, n := range x.Exprs { + p.space() + p.arithmExpr(n, true) + } + } + return startRedirs +} + +func startsWithLparen(s *Stmt) bool { + switch x := s.Cmd.(type) { + case *Subshell: + return true + case *BinaryCmd: + return startsWithLparen(x.X) + } + return false +} + +func (p *printer) hasInline(pos, npos, nline Pos) bool { + for _, c := range p.comments { + if c.Hash > nline { + return false + } + if c.Hash > pos && (npos == 0 || c.Hash < npos) { + return true + } + } + return false +} + +func (p *printer) stmts(stmts []*Stmt) { + switch len(stmts) { + case 0: + return + case 1: + s := stmts[0] + pos := s.Pos() + p.commentsUpTo(pos) + if pos <= p.nline { + p.stmt(s) + } else { + if p.nlineIndex > 0 { + p.newlines(pos) + } else { + p.incLines(pos) + } + p.stmt(s) + p.wantNewline = true + } + return + } + inlineIndent := 0 + for i, s := range stmts { + pos := s.Pos() + ind := p.nlineIndex + p.commentsUpTo(pos) + if p.nlineIndex > 0 { + p.newlines(pos) + } + p.incLines(pos) + p.stmt(s) + var npos Pos + if i+1 < len(stmts) { + npos = stmts[i+1].Pos() + } + if !p.hasInline(pos, npos, p.nline) { + inlineIndent = 0 + p.commentPadding = 0 + continue + } + if ind < len(p.f.Lines)-1 && s.End() > Pos(p.f.Lines[ind+1]) { + inlineIndent = 0 + } + if inlineIndent == 0 { + ind2 := p.nlineIndex + nline2 := p.nline + follow := stmts[i:] + for j, s2 := range follow { + pos2 := s2.Pos() + var npos2 Pos + if j+1 < len(follow) { + npos2 = follow[j+1].Pos() + } + if pos2 > nline2 || !p.hasInline(pos2, npos2, nline2) { + break + } + if l := p.stmtLen(s2); l > inlineIndent { + inlineIndent = l + } + if ind2++; ind2 >= len(p.f.Lines) { + nline2 = maxPos + } else { + nline2 = Pos(p.f.Lines[ind2]) + } + } + if ind2 == p.nlineIndex+1 { + // no inline comments directly after this one + continue + } + } + if inlineIndent > 0 { + p.commentPadding = inlineIndent - p.stmtLen(s) + } + } + p.wantNewline = true +} + +type byteCounter int + +func (c *byteCounter) WriteByte(b byte) error { + *c++ + return nil +} +func (c *byteCounter) WriteString(s string) (int, error) { + *c += byteCounter(len(s)) + return 0, nil +} +func (c *byteCounter) Reset(io.Writer) { *c = 0 } +func (c *byteCounter) Flush() error { return nil } + +func (p *printer) stmtLen(s *Stmt) int { + if p.lenPrinter == nil { + p.lenPrinter = new(printer) + } + *p.lenPrinter = printer{bufWriter: &p.lenCounter} + p.lenPrinter.bufWriter.Reset(nil) + p.lenPrinter.f = p.f + p.lenPrinter.incLines(s.Pos()) + p.lenPrinter.stmt(s) + return int(p.lenCounter) +} + +func (p *printer) nestedStmts(stmts []*Stmt, closing Pos) { + p.incLevel() + if len(stmts) == 1 && closing > p.nline && stmts[0].End() <= p.nline { + p.newline(0) + p.indent() + } + p.stmts(stmts) + p.decLevel() +} + +func (p *printer) assigns(assigns []*Assign) { + anyNewline := false + for _, a := range assigns { + if a.Pos() > p.nline { + p.bslashNewl() + if !anyNewline { + p.incLevel() + anyNewline = true + } + p.indent() + } else if p.wantSpace { + p.space() + } + if a.Name != nil { + p.WriteString(a.Name.Value) + if a.Append { + p.WriteByte('+') + } + p.WriteByte('=') + } + p.word(a.Value) + p.wantSpace = true + } + if anyNewline { + p.decLevel() + } +} diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/tokens.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/tokens.go new file mode 100644 index 0000000000..dee108a259 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/tokens.go @@ -0,0 +1,423 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package syntax + +// Token is the set of lexical tokens and reserved words. +type Token int + +// The list of all possible tokens and reserved words. +const ( + illegalTok Token = iota + _EOF + _Lit + _LitWord + + sglQuote // ' + dblQuote // " + bckQuote // ` + + And // & + AndExpr // && + OrExpr // || + Or // | + pipeAll // |& - bash + + dollar // $ + dollSglQuote // $' - bash + dollDblQuote // $" - bash + dollBrace // ${ + dollBrack // $[ + dollParen // $( + dollDblParen // $(( + leftBrack // [ + leftParen // ( + dblLeftParen // (( - bash + + rightBrace // } + rightBrack // ] + rightParen // ) + dblRightParen // )) + semicolon // ; + + dblSemicolon // ;; + semiFall // ;& - bash + dblSemiFall // ;;& - bash + + Mul // * + Not // ! + Inc // ++ + Dec // -- + Pow // ** + Eql // == + Neq // != + Leq // <= + Geq // >= + + AddAssgn // += + SubAssgn // -= + MulAssgn // *= + QuoAssgn // /= + RemAssgn // %= + AndAssgn // &= + OrAssgn // |= + XorAssgn // ^= + ShlAssgn // <<= + ShrAssgn // >>= + + Gtr // > + Shr // >> + Lss // < + rdrInOut // <> + dplIn // <& + dplOut // >& + clbOut // >| + Shl // << + dashHdoc // <<- + wordHdoc // <<< - bash + rdrAll // &> - bash + appAll // &>> - bash + + cmdIn // <( - bash + cmdOut // >( - bash + + Add // + + ColAdd // :+ + Sub // - + ColSub // :- + Quest // ? + ColQuest // :? + Assgn // = + ColAssgn // := + Rem // % + dblRem // %% + Hash // # + dblHash // ## + Xor // ^ + dblXor // ^^ - bash + Comma // , + dblComma // ,, - bash + Quo // / + dblQuo // // + Colon // : + + tsNot // ! + tsExists // -e + tsRegFile // -f + tsDirect // -d + tsCharSp // -c + tsBlckSp // -b + tsNmPipe // -p + tsSocket // -S + tsSmbLink // -L + tsGIDSet // -g + tsUIDSet // -u + tsRead // -r + tsWrite // -w + tsExec // -x + tsNoEmpty // -s + tsFdTerm // -t + tsEmpStr // -z + tsNempStr // -n + tsOptSet // -o + tsVarSet // -v + tsRefVar // -R + + tsReMatch // =~ + tsNewer // -nt + tsOlder // -ot + tsDevIno // -ef + tsEql // -eq + tsNeq // -ne + tsLeq // -le + tsGeq // -ge + tsLss // -lt + tsGtr // -gt + + globQuest // ?( + globMul // *( + globAdd // +( + globAt // @( + globNot // !( +) + +type RedirOperator Token + +const ( + RdrOut = RedirOperator(Gtr) + iota + AppOut + RdrIn + RdrInOut + DplIn + DplOut + ClbOut + Hdoc + DashHdoc + WordHdoc + RdrAll + AppAll +) + +type ProcOperator Token + +const ( + CmdIn = ProcOperator(cmdIn) + iota + CmdOut +) + +type GlobOperator Token + +const ( + GlobQuest = GlobOperator(globQuest) + iota + GlobMul + GlobAdd + GlobAt + GlobNot +) + +type BinCmdOperator Token + +const ( + AndStmt = BinCmdOperator(AndExpr) + iota + OrStmt + Pipe + PipeAll +) + +type CaseOperator Token + +const ( + DblSemicolon = CaseOperator(dblSemicolon) + iota + SemiFall + DblSemiFall +) + +type ParExpOperator Token + +const ( + SubstAdd = ParExpOperator(Add) + iota + SubstColAdd + SubstSub + SubstColSub + SubstQuest + SubstColQuest + SubstAssgn + SubstColAssgn + RemSmallSuffix + RemLargeSuffix + RemSmallPrefix + RemLargePrefix + UpperFirst + UpperAll + LowerFirst + LowerAll +) + +type UnTestOperator Token + +const ( + TsNot = UnTestOperator(tsNot) + iota + TsExists + TsRegFile + TsDirect + TsCharSp + TsBlckSp + TsNmPipe + TsSocket + TsSmbLink + TsGIDSet + TsUIDSet + TsRead + TsWrite + TsExec + TsNoEmpty + TsFdTerm + TsEmpStr + TsNempStr + TsOptSet + TsVarSet + TsRefVar +) + +type BinTestOperator Token + +const ( + TsReMatch = BinTestOperator(tsReMatch) + iota + TsNewer + TsOlder + TsDevIno + TsEql + TsNeq + TsLeq + TsGeq + TsLss + TsGtr + AndTest = BinTestOperator(AndExpr) + OrTest = BinTestOperator(OrExpr) + TsAssgn = BinTestOperator(Assgn) + TsEqual = BinTestOperator(Eql) + TsNequal = BinTestOperator(Neq) + TsBefore = BinTestOperator(Lss) + TsAfter = BinTestOperator(Gtr) +) + +func (o RedirOperator) String() string { return Token(o).String() } +func (o ProcOperator) String() string { return Token(o).String() } +func (o GlobOperator) String() string { return Token(o).String() } +func (o BinCmdOperator) String() string { return Token(o).String() } +func (o CaseOperator) String() string { return Token(o).String() } +func (o ParExpOperator) String() string { return Token(o).String() } +func (o UnTestOperator) String() string { return Token(o).String() } +func (o BinTestOperator) String() string { return Token(o).String() } + +// Pos is the internal representation of a position within a source +// file. +type Pos int + +var defaultPos Pos + +const maxPos = Pos(^uint(0) >> 1) + +// Position describes a position within a source file including the line +// and column location. A Position is valid if the line number is > 0. +type Position struct { + Offset int // byte offset, starting at 0 + Line int // line number, starting at 1 + Column int // column number, starting at 1 (in bytes) +} + +var tokNames = map[Token]string{ + illegalTok: "illegal", + _EOF: "EOF", + _Lit: "Lit", + _LitWord: "LitWord", + + sglQuote: "'", + dblQuote: `"`, + bckQuote: "`", + + And: "&", + AndExpr: "&&", + OrExpr: "||", + Or: "|", + pipeAll: "|&", + + dollar: "$", + dollSglQuote: "$'", + dollDblQuote: `$"`, + dollBrace: "${", + dollBrack: "$[", + dollParen: "$(", + dollDblParen: "$((", + leftBrack: "[", + leftParen: "(", + dblLeftParen: "((", + + rightBrace: "}", + rightBrack: "]", + rightParen: ")", + dblRightParen: "))", + semicolon: ";", + + dblSemicolon: ";;", + semiFall: ";&", + dblSemiFall: ";;&", + + Gtr: ">", + Shr: ">>", + Lss: "<", + rdrInOut: "<>", + dplIn: "<&", + dplOut: ">&", + clbOut: ">|", + Shl: "<<", + dashHdoc: "<<-", + wordHdoc: "<<<", + rdrAll: "&>", + appAll: "&>>", + + cmdIn: "<(", + cmdOut: ">(", + + Add: "+", + ColAdd: ":+", + Sub: "-", + ColSub: ":-", + Quest: "?", + ColQuest: ":?", + Assgn: "=", + ColAssgn: ":=", + Rem: "%", + dblRem: "%%", + Hash: "#", + dblHash: "##", + Xor: "^", + dblXor: "^^", + Comma: ",", + dblComma: ",,", + Quo: "/", + dblQuo: "//", + Colon: ":", + + Mul: "*", + Not: "!", + Inc: "++", + Dec: "--", + Pow: "**", + Eql: "==", + Neq: "!=", + Leq: "<=", + Geq: ">=", + + AddAssgn: "+=", + SubAssgn: "-=", + MulAssgn: "*=", + QuoAssgn: "/=", + RemAssgn: "%=", + AndAssgn: "&=", + OrAssgn: "|=", + XorAssgn: "^=", + ShlAssgn: "<<=", + ShrAssgn: ">>=", + + tsNot: "!", + tsExists: "-e", + tsRegFile: "-f", + tsDirect: "-d", + tsCharSp: "-c", + tsBlckSp: "-b", + tsNmPipe: "-p", + tsSocket: "-S", + tsSmbLink: "-L", + tsGIDSet: "-g", + tsUIDSet: "-u", + tsRead: "-r", + tsWrite: "-w", + tsExec: "-x", + tsNoEmpty: "-s", + tsFdTerm: "-t", + tsEmpStr: "-z", + tsNempStr: "-n", + tsOptSet: "-o", + tsVarSet: "-v", + tsRefVar: "-R", + + tsReMatch: "=~", + tsNewer: "-nt", + tsOlder: "-ot", + tsDevIno: "-ef", + tsEql: "-eq", + tsNeq: "-ne", + tsLeq: "-le", + tsGeq: "-ge", + tsLss: "-lt", + tsGtr: "-gt", + + globQuest: "?(", + globMul: "*(", + globAdd: "+(", + globAt: "@(", + globNot: "!(", +} + +func (t Token) String() string { return tokNames[t] } diff --git a/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/walk.go b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/walk.go new file mode 100644 index 0000000000..76d3339086 --- /dev/null +++ b/vendor/github.com/weaveworks/common/tools/vendor/github.com/mvdan/sh/syntax/walk.go @@ -0,0 +1,189 @@ +// Copyright (c) 2016, Daniel Martí +// See LICENSE for licensing information + +package syntax + +import "fmt" + +// Visitor holds a Visit method which is invoked for each node +// encountered by Walk. If the result visitor w is not nil, Walk visits +// each of the children of node with the visitor w, followed by a call +// of w.Visit(nil). +type Visitor interface { + Visit(node Node) (w Visitor) +} + +func walkStmts(v Visitor, stmts []*Stmt) { + for _, s := range stmts { + Walk(v, s) + } +} + +func walkWords(v Visitor, words []Word) { + for i := range words { + Walk(v, &words[i]) + } +} + +// Walk traverses an AST in depth-first order: It starts by calling +// v.Visit(node); node must not be nil. If the visitor w returned by +// v.Visit(node) is not nil, Walk is invoked recursively with visitor w +// for each of the non-nil children of node, followed by a call of +// w.Visit(nil). +func Walk(v Visitor, node Node) { + if v = v.Visit(node); v == nil { + return + } + + switch x := node.(type) { + case *File: + walkStmts(v, x.Stmts) + case *Stmt: + if x.Cmd != nil { + Walk(v, x.Cmd) + } + for _, a := range x.Assigns { + Walk(v, a) + } + for _, r := range x.Redirs { + Walk(v, r) + } + case *Assign: + if x.Name != nil { + Walk(v, x.Name) + } + Walk(v, &x.Value) + case *Redirect: + if x.N != nil { + Walk(v, x.N) + } + Walk(v, &x.Word) + if len(x.Hdoc.Parts) > 0 { + Walk(v, &x.Hdoc) + } + case *CallExpr: + walkWords(v, x.Args) + case *Subshell: + walkStmts(v, x.Stmts) + case *Block: + walkStmts(v, x.Stmts) + case *IfClause: + walkStmts(v, x.CondStmts) + walkStmts(v, x.ThenStmts) + for _, elif := range x.Elifs { + walkStmts(v, elif.CondStmts) + walkStmts(v, elif.ThenStmts) + } + walkStmts(v, x.ElseStmts) + case *WhileClause: + walkStmts(v, x.CondStmts) + walkStmts(v, x.DoStmts) + case *UntilClause: + walkStmts(v, x.CondStmts) + walkStmts(v, x.DoStmts) + case *ForClause: + Walk(v, x.Loop) + walkStmts(v, x.DoStmts) + case *WordIter: + Walk(v, &x.Name) + walkWords(v, x.List) + case *CStyleLoop: + if x.Init != nil { + Walk(v, x.Init) + } + if x.Cond != nil { + Walk(v, x.Cond) + } + if x.Post != nil { + Walk(v, x.Post) + } + case *BinaryCmd: + Walk(v, x.X) + Walk(v, x.Y) + case *FuncDecl: + Walk(v, &x.Name) + Walk(v, x.Body) + case *Word: + for _, wp := range x.Parts { + Walk(v, wp) + } + case *Lit: + case *SglQuoted: + case *DblQuoted: + for _, wp := range x.Parts { + Walk(v, wp) + } + case *CmdSubst: + walkStmts(v, x.Stmts) + case *ParamExp: + Walk(v, &x.Param) + if x.Ind != nil { + Walk(v, &x.Ind.Word) + } + if x.Repl != nil { + Walk(v, &x.Repl.Orig) + Walk(v, &x.Repl.With) + } + if x.Exp != nil { + Walk(v, &x.Exp.Word) + } + case *ArithmExp: + if x.X != nil { + Walk(v, x.X) + } + case *ArithmCmd: + if x.X != nil { + Walk(v, x.X) + } + case *BinaryArithm: + Walk(v, x.X) + Walk(v, x.Y) + case *BinaryTest: + Walk(v, x.X) + Walk(v, x.Y) + case *UnaryArithm: + Walk(v, x.X) + case *UnaryTest: + Walk(v, x.X) + case *ParenArithm: + Walk(v, x.X) + case *ParenTest: + Walk(v, x.X) + case *CaseClause: + Walk(v, &x.Word) + for _, pl := range x.List { + walkWords(v, pl.Patterns) + walkStmts(v, pl.Stmts) + } + case *TestClause: + Walk(v, x.X) + case *DeclClause: + walkWords(v, x.Opts) + for _, a := range x.Assigns { + Walk(v, a) + } + case *ArrayExpr: + walkWords(v, x.List) + case *ExtGlob: + Walk(v, &x.Pattern) + case *ProcSubst: + walkStmts(v, x.Stmts) + case *EvalClause: + if x.Stmt != nil { + Walk(v, x.Stmt) + } + case *CoprocClause: + if x.Name != nil { + Walk(v, x.Name) + } + Walk(v, x.Stmt) + case *LetClause: + for _, expr := range x.Exprs { + Walk(v, expr) + } + default: + panic(fmt.Sprintf("ast.Walk: unexpected node type %T", x)) + } + + v.Visit(nil) +} diff --git a/vendor/github.com/weaveworks/docker/pkg/mflag/LICENSE b/vendor/github.com/weaveworks/docker/pkg/mflag/LICENSE new file mode 100644 index 0000000000..8f3fee627a --- /dev/null +++ b/vendor/github.com/weaveworks/docker/pkg/mflag/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/weaveworks/docker/pkg/mflag/example/example.go b/vendor/github.com/weaveworks/docker/pkg/mflag/example/example.go new file mode 100644 index 0000000000..263f9dbcd2 --- /dev/null +++ b/vendor/github.com/weaveworks/docker/pkg/mflag/example/example.go @@ -0,0 +1,37 @@ +package main + +import ( + "fmt" + + flag "github.com/docker/docker/pkg/mflag" +) + +var ( + i int + str string + b, b2, h bool +) + +func init() { + flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") + flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") + flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") + flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") + flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") + flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage + flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") + flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") + flag.Parse() +} +func main() { + if h { + flag.PrintDefaults() + } else { + fmt.Printf("s/#hidden/-string: %s\n", str) + fmt.Printf("b: %t\n", b) + fmt.Printf("-bool: %t\n", b2) + fmt.Printf("-integer/-number: %d\n", i) + fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) + fmt.Printf("ARGS: %v\n", flag.Args()) + } +} diff --git a/vendor/github.com/weaveworks/docker/pkg/mflag/flag.go b/vendor/github.com/weaveworks/docker/pkg/mflag/flag.go new file mode 100644 index 0000000000..f13a8cf06c --- /dev/null +++ b/vendor/github.com/weaveworks/docker/pkg/mflag/flag.go @@ -0,0 +1,1280 @@ +// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mflag implements command-line flag parsing. +// +// Usage: +// +// Define flags using flag.String(), Bool(), Int(), etc. +// +// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. +// import flag "github.com/docker/docker/pkg/mflag" +// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") +// If you like, you can bind the flag to a variable using the Var() functions. +// var flagvar int +// func init() { +// // -flaghidden will work, but will be hidden from the usage +// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") +// } +// Or you can create custom flags that satisfy the Value interface (with +// pointer receivers) and couple them to flag parsing by +// flag.Var(&flagVal, []string{"name"}, "help message for flagname") +// For such flags, the default value is just the initial value of the variable. +// +// You can also add "deprecated" flags, they are still usable, but are not shown +// in the usage and will display a warning when you try to use them. `#` before +// an option means this option is deprecated, if there is a following option +// without `#` ahead, then that's the replacement, if not, it will just be removed: +// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") +// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or +// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` +// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") +// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` +// so you can only use `-f`. +// +// You can also group one letter flags, if you declare +// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") +// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") +// you will be able to use the -vs or -sv +// +// After all flags are defined, call +// flag.Parse() +// to parse the command line into the defined flags. +// +// Flags may then be used directly. If you're using the flags themselves, +// they are all pointers; if you bind to variables, they're values. +// fmt.Println("ip has value ", *ip) +// fmt.Println("flagvar has value ", flagvar) +// +// After parsing, the arguments after the flag are available as the +// slice flag.Args() or individually as flag.Arg(i). +// The arguments are indexed from 0 through flag.NArg()-1. +// +// Command line flag syntax: +// -flag +// -flag=x +// -flag="x" +// -flag='x' +// -flag x // non-boolean flags only +// One or two minus signs may be used; they are equivalent. +// The last form is not permitted for boolean flags because the +// meaning of the command +// cmd -x * +// will change if there is a file called 0, false, etc. You must +// use the -flag=false form to turn off a boolean flag. +// +// Flag parsing stops just before the first non-flag argument +// ("-" is a non-flag argument) or after the terminator "--". +// +// Integer flags accept 1234, 0664, 0x1234 and may be negative. +// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. +// Duration flags accept any input valid for time.ParseDuration. +// +// The default set of command-line flags is controlled by +// top-level functions. The FlagSet type allows one to define +// independent sets of flags, such as to implement subcommands +// in a command-line interface. The methods of FlagSet are +// analogous to the top-level functions for the command-line +// flag set. + +package mflag + +import ( + "errors" + "fmt" + "io" + "os" + "runtime" + "sort" + "strconv" + "strings" + "text/tabwriter" + "time" + + "github.com/docker/docker/pkg/homedir" +) + +// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// ErrRetry is the error returned if you need to try letter by letter +var ErrRetry = errors.New("flag: retry") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint16 Value +type uint16Value uint16 + +func newUint16Value(val uint16, p *uint16) *uint16Value { + *p = val + return (*uint16Value)(p) +} + +func (i *uint16Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 16) + *i = uint16Value(v) + return err +} + +func (i *uint16Value) Get() interface{} { return uint16(*i) } + +func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how to handle flag parsing errors. +type ErrorHandling int + +// ErrorHandling strategies available when a flag parsing error occurs +const ( + ContinueOnError ErrorHandling = iota + ExitOnError + PanicOnError +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + ShortUsage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use Out() accessor + nArgRequirements []nArgRequirement +} + +// A Flag represents the state of a flag. +type Flag struct { + Names []string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +type flagSlice []string + +func (p flagSlice) Len() int { return len(p) } +func (p flagSlice) Less(i, j int) bool { + pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") + lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) + if lpi != lpj { + return lpi < lpj + } + return pi < pj +} +func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + var list flagSlice + + // The sorted list is based on the first name, when flag map might use the other names. + nameMap := make(map[string]string) + + for n, f := range flags { + fName := strings.TrimPrefix(f.Names[0], "#") + nameMap[fName] = n + if len(f.Names) == 1 { + list = append(list, fName) + continue + } + + found := false + for _, name := range list { + if name == fName { + found = true + break + } + } + if !found { + list = append(list, fName) + } + } + sort.Sort(list) + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[nameMap[name]] + } + return result +} + +// Name returns the name of the FlagSet. +func (fs *FlagSet) Name() string { + return fs.name +} + +// Out returns the destination for usage and error messages. +func (fs *FlagSet) Out() io.Writer { + if fs.output == nil { + return os.Stderr + } + return fs.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (fs *FlagSet) SetOutput(output io.Writer) { + fs.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (fs *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(fs.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (fs *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(fs.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (fs *FlagSet) Lookup(name string) *Flag { + return fs.formal[name] +} + +// IsSet indicates whether the specified flag is set in the given FlagSet +func (fs *FlagSet) IsSet(name string) bool { + return fs.actual[name] != nil +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// IsSet indicates whether the specified flag was specified at all on the cmd line. +func IsSet(name string) bool { + return CommandLine.IsSet(name) +} + +type nArgRequirementType int + +// Indicator used to pass to BadArgs function +const ( + Exact nArgRequirementType = iota + Max + Min +) + +type nArgRequirement struct { + Type nArgRequirementType + N int +} + +// Require adds a requirement about the number of arguments for the FlagSet. +// The first parameter can be Exact, Max, or Min to respectively specify the exact, +// the maximum, or the minimal number of arguments required. +// The actual check is done in FlagSet.CheckArgs(). +func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { + fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) +} + +// CheckArgs uses the requirements set by FlagSet.Require() to validate +// the number of arguments. If the requirements are not met, +// an error message string is returned. +func (fs *FlagSet) CheckArgs() (message string) { + for _, req := range fs.nArgRequirements { + var arguments string + if req.N == 1 { + arguments = "1 argument" + } else { + arguments = fmt.Sprintf("%d arguments", req.N) + } + + str := func(kind string) string { + return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) + } + + switch req.Type { + case Exact: + if fs.NArg() != req.N { + return str("") + } + case Max: + if fs.NArg() > req.N { + return str("a maximum of ") + } + case Min: + if fs.NArg() < req.N { + return str("a minimum of ") + } + } + } + return "" +} + +// Set sets the value of the named flag. +func (fs *FlagSet) Set(name, value string) error { + flag, ok := fs.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + if err := flag.Value.Set(value); err != nil { + return err + } + if fs.actual == nil { + fs.actual = make(map[string]*Flag) + } + fs.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// isZeroValue guesses whether the string represents the zero +// value for a flag. It is not accurate but in practice works OK. +func isZeroValue(value string) bool { + switch value { + case "false": + return true + case "": + return true + case "0": + return true + } + return false +} + +// PrintDefaults prints, to standard error unless configured +// otherwise, the default values of all defined flags in the set. +func (fs *FlagSet) PrintDefaults() { + writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) + home := homedir.Get() + + // Don't substitute when HOME is / + if runtime.GOOS != "windows" && home == "/" { + home = "" + } + + // Add a blank line between cmd description and list of options + if fs.FlagCount() > 0 { + fmt.Fprintln(writer, "") + } + + fs.VisitAll(func(flag *Flag) { + names := []string{} + for _, name := range flag.Names { + if name[0] != '#' { + names = append(names, name) + } + } + if len(names) > 0 && len(flag.Usage) > 0 { + val := flag.DefValue + + if home != "" && strings.HasPrefix(val, home) { + val = homedir.GetShortcutString() + val[len(home):] + } + + if isZeroValue(val) { + format := " -%s" + fmt.Fprintf(writer, format, strings.Join(names, ", -")) + } else { + format := " -%s=%s" + fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) + } + for _, line := range strings.Split(flag.Usage, "\n") { + fmt.Fprintln(writer, "\t", line) + } + } + }) + writer.Flush() +} + +// PrintDefaults prints to standard error the default values of all defined command-line flags. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(fs *FlagSet) { + if fs.name == "" { + fmt.Fprintf(fs.Out(), "Usage:\n") + } else { + fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) + } + fs.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// The function is a variable that may be changed to point to a custom function. +var Usage = func() { + fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// ShortUsage prints to standard error a usage message documenting the standard command layout +// The function is a variable that may be changed to point to a custom function. +var ShortUsage = func() { + fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) +} + +// FlagCount returns the number of flags that have been defined. +func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } + +// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. +func (fs *FlagSet) FlagCountUndeprecated() int { + count := 0 + for _, flag := range sortFlags(fs.formal) { + for _, name := range flag.Names { + if name[0] != '#' { + count++ + break + } + } + } + return count +} + +// NFlag returns the number of flags that have been set. +func (fs *FlagSet) NFlag() int { return len(fs.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. +func (fs *FlagSet) Arg(i int) string { + if i < 0 || i >= len(fs.args) { + return "" + } + return fs.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (fs *FlagSet) NArg() int { return len(fs.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (fs *FlagSet) Args() []string { return fs.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { + fs.Var(newBoolValue(value, p), names, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, names []string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), names, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { + p := new(bool) + fs.BoolVar(p, names, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(names []string, value bool, usage string) *bool { + return CommandLine.Bool(names, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { + fs.Var(newIntValue(value, p), names, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, names []string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), names, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (fs *FlagSet) Int(names []string, value int, usage string) *int { + p := new(int) + fs.IntVar(p, names, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(names []string, value int, usage string) *int { + return CommandLine.Int(names, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { + fs.Var(newInt64Value(value, p), names, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, names []string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), names, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { + p := new(int64) + fs.Int64Var(p, names, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(names []string, value int64, usage string) *int64 { + return CommandLine.Int64(names, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { + fs.Var(newUintValue(value, p), names, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, names []string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), names, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { + p := new(uint) + fs.UintVar(p, names, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(names []string, value uint, usage string) *uint { + return CommandLine.Uint(names, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { + fs.Var(newUint64Value(value, p), names, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, names []string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), names, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { + p := new(uint64) + fs.Uint64Var(p, names, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(names []string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(names, value, usage) +} + +// Uint16Var defines a uint16 flag with specified name, default value, and usage string. +// The argument p points to a uint16 variable in which to store the value of the flag. +func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) { + fs.Var(newUint16Value(value, p), names, usage) +} + +// Uint16Var defines a uint16 flag with specified name, default value, and usage string. +// The argument p points to a uint16 variable in which to store the value of the flag. +func Uint16Var(p *uint16, names []string, value uint16, usage string) { + CommandLine.Var(newUint16Value(value, p), names, usage) +} + +// Uint16 defines a uint16 flag with specified name, default value, and usage string. +// The return value is the address of a uint16 variable that stores the value of the flag. +func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 { + p := new(uint16) + fs.Uint16Var(p, names, value, usage) + return p +} + +// Uint16 defines a uint16 flag with specified name, default value, and usage string. +// The return value is the address of a uint16 variable that stores the value of the flag. +func Uint16(names []string, value uint16, usage string) *uint16 { + return CommandLine.Uint16(names, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { + fs.Var(newStringValue(value, p), names, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, names []string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), names, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (fs *FlagSet) String(names []string, value string, usage string) *string { + p := new(string) + fs.StringVar(p, names, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(names []string, value string, usage string) *string { + return CommandLine.String(names, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { + fs.Var(newFloat64Value(value, p), names, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, names []string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), names, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { + p := new(float64) + fs.Float64Var(p, names, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(names []string, value float64, usage string) *float64 { + return CommandLine.Float64(names, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + fs.Var(newDurationValue(value, p), names, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), names, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + fs.DurationVar(p, names, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +func Duration(names []string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(names, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (fs *FlagSet) Var(value Value, names []string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{names, usage, value, value.String()} + for _, name := range names { + name = strings.TrimPrefix(name, "#") + _, alreadythere := fs.formal[name] + if alreadythere { + var msg string + if fs.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) + } + fmt.Fprintln(fs.Out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if fs.formal == nil { + fs.formal = make(map[string]*Flag) + } + fs.formal[name] = flag + } +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, names []string, usage string) { + CommandLine.Var(value, names, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (fs *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(fs.Out(), err) + if os.Args[0] == fs.name { + fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) + } else { + fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) + } + return err +} + +// usage calls the Usage method for the flag set, or the usage function if +// the flag set is CommandLine. +func (fs *FlagSet) usage() { + if fs == CommandLine { + Usage() + } else if fs.Usage == nil { + defaultUsage(fs) + } else { + fs.Usage() + } +} + +func trimQuotes(str string) string { + if len(str) == 0 { + return str + } + type quote struct { + start, end byte + } + + // All valid quote types. + quotes := []quote{ + // Double quotes + { + start: '"', + end: '"', + }, + + // Single quotes + { + start: '\'', + end: '\'', + }, + } + + for _, quote := range quotes { + // Only strip if outermost match. + if str[0] == quote.start && str[len(str)-1] == quote.end { + str = str[1 : len(str)-1] + break + } + } + + return str +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (fs *FlagSet) parseOne() (bool, string, error) { + if len(fs.args) == 0 { + return false, "", nil + } + s := fs.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, "", nil + } + if s[1] == '-' && len(s) == 2 { // "--" terminates the flags + fs.args = fs.args[1:] + return false, "", nil + } + name := s[1:] + if len(name) == 0 || name[0] == '=' { + return false, "", fs.failf("bad flag syntax: %s", s) + } + + // it's a flag. does it have an argument? + fs.args = fs.args[1:] + hasValue := false + value := "" + if i := strings.Index(name, "="); i != -1 { + value = trimQuotes(name[i+1:]) + hasValue = true + name = name[:i] + } + + m := fs.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "-help" || name == "help" || name == "h" { // special case for nice help message. + fs.usage() + return false, "", ErrHelp + } + if len(name) > 0 && name[0] == '-' { + return false, "", fs.failf("flag provided but not defined: -%s", name) + } + return false, name, ErrRetry + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + fv.Set("true") + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(fs.args) > 0 { + // value is the next arg + hasValue = true + value, fs.args = fs.args[0], fs.args[1:] + } + if !hasValue { + return false, "", fs.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if fs.actual == nil { + fs.actual = make(map[string]*Flag) + } + fs.actual[name] = flag + for i, n := range flag.Names { + if n == fmt.Sprintf("#%s", name) { + replacement := "" + for j := i; j < len(flag.Names); j++ { + if flag.Names[j][0] != '#' { + replacement = flag.Names[j] + break + } + } + if replacement != "" { + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) + } else { + fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) + } + } + } + return true, "", nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help was set but not defined. +func (fs *FlagSet) Parse(arguments []string) error { + fs.parsed = true + fs.args = arguments + for { + seen, name, err := fs.parseOne() + if seen { + continue + } + if err == nil { + break + } + if err == ErrRetry { + if len(name) > 1 { + err = nil + for _, letter := range strings.Split(name, "") { + fs.args = append([]string{"-" + letter}, fs.args...) + seen2, _, err2 := fs.parseOne() + if seen2 { + continue + } + if err2 != nil { + err = fs.failf("flag provided but not defined: -%s", name) + break + } + } + if err == nil { + continue + } + } else { + err = fs.failf("flag provided but not defined: -%s", name) + } + } + switch fs.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(125) + case PanicOnError: + panic(err) + } + } + return nil +} + +// ParseFlags is a utility function that adds a help flag if withHelp is true, +// calls fs.Parse(args) and prints a relevant error message if there are +// incorrect number of arguments. It returns error only if error handling is +// set to ContinueOnError and parsing fails. If error handling is set to +// ExitOnError, it's safe to ignore the return value. +func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { + var help *bool + if withHelp { + help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") + } + if err := fs.Parse(args); err != nil { + return err + } + if help != nil && *help { + fs.SetOutput(os.Stdout) + fs.Usage() + os.Exit(0) + } + if str := fs.CheckArgs(); str != "" { + fs.SetOutput(os.Stderr) + fs.ReportError(str, withHelp) + fs.ShortUsage() + os.Exit(1) + } + return nil +} + +// ReportError is a utility method that prints a user-friendly message +// containing the error that occurred during parsing and a suggestion to get help +func (fs *FlagSet) ReportError(str string, withHelp bool) { + if withHelp { + if os.Args[0] == fs.Name() { + str += ".\nSee '" + os.Args[0] + " --help'" + } else { + str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" + } + } + fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str) +} + +// Parsed reports whether fs.Parse has been called. +func (fs *FlagSet) Parsed() bool { + return fs.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed returns true if the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name and the +// ContinueOnError error handling policy. +func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { + fs.name = name + fs.errorHandling = errorHandling +} + +type mergeVal struct { + Value + key string + fset *FlagSet +} + +func (v mergeVal) Set(s string) error { + return v.fset.Set(v.key, s) +} + +func (v mergeVal) IsBoolFlag() bool { + if b, ok := v.Value.(boolFlag); ok { + return b.IsBoolFlag() + } + return false +} + +// Name returns the name of a mergeVal. +// If the original value had a name, return the original name, +// otherwise, return the key asinged to this mergeVal. +func (v mergeVal) Name() string { + type namedValue interface { + Name() string + } + if nVal, ok := v.Value.(namedValue); ok { + return nVal.Name() + } + return v.key +} + +// Merge is a helper function that merges n FlagSets into a single dest FlagSet +// In case of name collision between the flagsets it will apply +// the destination FlagSet's errorHandling behavior. +func Merge(dest *FlagSet, flagsets ...*FlagSet) error { + for _, fset := range flagsets { + if fset.formal == nil { + continue + } + for k, f := range fset.formal { + if _, ok := dest.formal[k]; ok { + var err error + if fset.name == "" { + err = fmt.Errorf("flag redefined: %s", k) + } else { + err = fmt.Errorf("%s flag redefined: %s", fset.name, k) + } + fmt.Fprintln(fset.Out(), err.Error()) + // Happens only if flags are declared with identical names + switch dest.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + newF := *f + newF.Value = mergeVal{f.Value, k, fset} + if dest.formal == nil { + dest.formal = make(map[string]*Flag) + } + dest.formal[k] = &newF + } + } + return nil +} + +// IsEmpty reports if the FlagSet is actually empty. +func (fs *FlagSet) IsEmpty() bool { + return len(fs.actual) == 0 +} diff --git a/vendor/golang.org/x/tools/cover/LICENSE b/vendor/golang.org/x/tools/cover/LICENSE new file mode 100644 index 0000000000..6a66aea5ea --- /dev/null +++ b/vendor/golang.org/x/tools/cover/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/tools/cover/profile.go b/vendor/golang.org/x/tools/cover/profile.go new file mode 100644 index 0000000000..958881a2a2 --- /dev/null +++ b/vendor/golang.org/x/tools/cover/profile.go @@ -0,0 +1,190 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package cover provides support for parsing coverage profiles +// generated by "go test -coverprofile=cover.out". +package cover // import "golang.org/x/tools/cover" + +import ( + "bufio" + "fmt" + "math" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Profile represents the profiling data for a specific file. +type Profile struct { + FileName string + Mode string + Blocks []ProfileBlock +} + +// ProfileBlock represents a single block of profiling data. +type ProfileBlock struct { + StartLine, StartCol int + EndLine, EndCol int + NumStmt, Count int +} + +type byFileName []*Profile + +func (p byFileName) Len() int { return len(p) } +func (p byFileName) Less(i, j int) bool { return p[i].FileName < p[j].FileName } +func (p byFileName) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +// ParseProfiles parses profile data in the specified file and returns a +// Profile for each source file described therein. +func ParseProfiles(fileName string) ([]*Profile, error) { + pf, err := os.Open(fileName) + if err != nil { + return nil, err + } + defer pf.Close() + + files := make(map[string]*Profile) + buf := bufio.NewReader(pf) + // First line is "mode: foo", where foo is "set", "count", or "atomic". + // Rest of file is in the format + // encoding/base64/base64.go:34.44,37.40 3 1 + // where the fields are: name.go:line.column,line.column numberOfStatements count + s := bufio.NewScanner(buf) + mode := "" + for s.Scan() { + line := s.Text() + if mode == "" { + const p = "mode: " + if !strings.HasPrefix(line, p) || line == p { + return nil, fmt.Errorf("bad mode line: %v", line) + } + mode = line[len(p):] + continue + } + m := lineRe.FindStringSubmatch(line) + if m == nil { + return nil, fmt.Errorf("line %q doesn't match expected format: %v", line, lineRe) + } + fn := m[1] + p := files[fn] + if p == nil { + p = &Profile{ + FileName: fn, + Mode: mode, + } + files[fn] = p + } + p.Blocks = append(p.Blocks, ProfileBlock{ + StartLine: toInt(m[2]), + StartCol: toInt(m[3]), + EndLine: toInt(m[4]), + EndCol: toInt(m[5]), + NumStmt: toInt(m[6]), + Count: toInt(m[7]), + }) + } + if err := s.Err(); err != nil { + return nil, err + } + for _, p := range files { + sort.Sort(blocksByStart(p.Blocks)) + } + // Generate a sorted slice. + profiles := make([]*Profile, 0, len(files)) + for _, profile := range files { + profiles = append(profiles, profile) + } + sort.Sort(byFileName(profiles)) + return profiles, nil +} + +type blocksByStart []ProfileBlock + +func (b blocksByStart) Len() int { return len(b) } +func (b blocksByStart) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b blocksByStart) Less(i, j int) bool { + bi, bj := b[i], b[j] + return bi.StartLine < bj.StartLine || bi.StartLine == bj.StartLine && bi.StartCol < bj.StartCol +} + +var lineRe = regexp.MustCompile(`^(.+):([0-9]+).([0-9]+),([0-9]+).([0-9]+) ([0-9]+) ([0-9]+)$`) + +func toInt(s string) int { + i, err := strconv.Atoi(s) + if err != nil { + panic(err) + } + return i +} + +// Boundary represents the position in a source file of the beginning or end of a +// block as reported by the coverage profile. In HTML mode, it will correspond to +// the opening or closing of a tag and will be used to colorize the source +type Boundary struct { + Offset int // Location as a byte offset in the source file. + Start bool // Is this the start of a block? + Count int // Event count from the cover profile. + Norm float64 // Count normalized to [0..1]. +} + +// Boundaries returns a Profile as a set of Boundary objects within the provided src. +func (p *Profile) Boundaries(src []byte) (boundaries []Boundary) { + // Find maximum count. + max := 0 + for _, b := range p.Blocks { + if b.Count > max { + max = b.Count + } + } + // Divisor for normalization. + divisor := math.Log(float64(max)) + + // boundary returns a Boundary, populating the Norm field with a normalized Count. + boundary := func(offset int, start bool, count int) Boundary { + b := Boundary{Offset: offset, Start: start, Count: count} + if !start || count == 0 { + return b + } + if max <= 1 { + b.Norm = 0.8 // Profile is in"set" mode; we want a heat map. Use cov8 in the CSS. + } else if count > 0 { + b.Norm = math.Log(float64(count)) / divisor + } + return b + } + + line, col := 1, 2 // TODO: Why is this 2? + for si, bi := 0, 0; si < len(src) && bi < len(p.Blocks); { + b := p.Blocks[bi] + if b.StartLine == line && b.StartCol == col { + boundaries = append(boundaries, boundary(si, true, b.Count)) + } + if b.EndLine == line && b.EndCol == col || line > b.EndLine { + boundaries = append(boundaries, boundary(si, false, 0)) + bi++ + continue // Don't advance through src; maybe the next block starts here. + } + if src[si] == '\n' { + line++ + col = 0 + } + col++ + si++ + } + sort.Sort(boundariesByPos(boundaries)) + return +} + +type boundariesByPos []Boundary + +func (b boundariesByPos) Len() int { return len(b) } +func (b boundariesByPos) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b boundariesByPos) Less(i, j int) bool { + if b[i].Offset == b[j].Offset { + return !b[i].Start && b[j].Start + } + return b[i].Offset < b[j].Offset +} diff --git a/vendor/manifest b/vendor/manifest index 0ca5f4ca41..f184d1c858 100644 --- a/vendor/manifest +++ b/vendor/manifest @@ -53,6 +53,14 @@ "branch": "master", "notests": true }, + { + "importpath": "github.com/armon/go-socks5", + "repository": "https://github.com/armon/go-socks5", + "vcs": "git", + "revision": "e75332964ef517daa070d7c38a9466a0d687e0a5", + "branch": "master", + "notests": true + }, { "importpath": "github.com/aws/aws-sdk-go", "repository": "https://github.com/aws/aws-sdk-go", @@ -82,6 +90,14 @@ "revision": "fb6c0b0e1ff03057a054886141927cdce6239dec", "branch": "master" }, + { + "importpath": "github.com/bmizerany/assert", + "repository": "https://github.com/bmizerany/assert", + "vcs": "git", + "revision": "b7ed37b82869576c289d7d97fb2bbd8b64a0cb28", + "branch": "master", + "notests": true + }, { "importpath": "github.com/bradfitz/gomemcache/memcache", "repository": "https://github.com/bradfitz/gomemcache", @@ -792,6 +808,15 @@ "revision": "f3b10ff408486b3e248197254514778285fbdea1", "branch": "master" }, + { + "importpath": "github.com/fluent/fluent-logger-golang/fluent", + "repository": "https://github.com/fluent/fluent-logger-golang", + "vcs": "git", + "revision": "28bdb662295c1b7aca09c1a4edbf9430010775a9", + "branch": "master", + "path": "/fluent", + "notests": true + }, { "importpath": "github.com/fsouza/go-dockerclient", "repository": "https://github.com/fsouza/go-dockerclient", @@ -961,6 +986,14 @@ "revision": "77ed1c8a01217656d2080ad51981f6e99adaa177", "branch": "master" }, + { + "importpath": "github.com/kr/pretty", + "repository": "https://github.com/kr/pretty", + "vcs": "git", + "revision": "cfb55aafdaf3ec08f0db22699ab822c50091b1c4", + "branch": "master", + "notests": true + }, { "importpath": "github.com/kr/pty", "repository": "https://github.com/kr/pty", @@ -968,6 +1001,14 @@ "revision": "f7ee69f31298ecbe5d2b349c711e2547a617d398", "branch": "master" }, + { + "importpath": "github.com/kr/text", + "repository": "https://github.com/kr/text", + "vcs": "git", + "revision": "7cafcd837844e784b526369c9bce262804aebc60", + "branch": "main", + "notests": true + }, { "importpath": "github.com/lsegal/gucumber", "repository": "https://github.com/lsegal/gucumber", @@ -975,6 +1016,30 @@ "revision": "e8116c9c66e641e9f81fc0a79fac923dfc646378", "branch": "master" }, + { + "importpath": "github.com/mattn/go-colorable", + "repository": "https://github.com/mattn/go-colorable", + "vcs": "git", + "revision": "d228849504861217f796da67fae4f6e347643f15", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/mattn/go-isatty", + "repository": "https://github.com/mattn/go-isatty", + "vcs": "git", + "revision": "30a891c33c7cde7b02a981314b4228ec99380cca", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/mattn/go-runewidth", + "repository": "https://github.com/mattn/go-runewidth", + "vcs": "git", + "revision": "737072b4e32b7a5018b4a7125da8d12de90e8045", + "branch": "master", + "notests": true + }, { "importpath": "github.com/matttproud/golang_protobuf_extensions/pbutil", "repository": "https://github.com/matttproud/golang_protobuf_extensions", @@ -983,6 +1048,14 @@ "branch": "master", "path": "/pbutil" }, + { + "importpath": "github.com/mgutz/ansi", + "repository": "https://github.com/mgutz/ansi", + "vcs": "git", + "revision": "c286dcecd19ff979eeb73ea444e479b903f2cfcb", + "branch": "master", + "notests": true + }, { "importpath": "github.com/miekg/dns", "repository": "https://github.com/miekg/dns", @@ -1076,6 +1149,14 @@ "revision": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3", "branch": "master" }, + { + "importpath": "github.com/olekukonko/tablewriter", + "repository": "https://github.com/olekukonko/tablewriter", + "vcs": "git", + "revision": "bdcc175572fd7abece6c831e643891b9331bc9e7", + "branch": "master", + "notests": true + }, { "importpath": "github.com/opencontainers/runc/libcontainer/cgroups", "repository": "https://github.com/opencontainers/runc", @@ -1138,6 +1219,14 @@ "revision": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655", "branch": "master" }, + { + "importpath": "github.com/philhofer/fwd", + "repository": "https://github.com/philhofer/fwd", + "vcs": "git", + "revision": "98c11a7a6ec829d672b03833c3d69a7fae1ca972", + "branch": "master", + "notests": true + }, { "importpath": "github.com/pmezard/go-difflib/difflib", "repository": "https://github.com/pmezard/go-difflib", @@ -1235,6 +1324,15 @@ "branch": "master", "path": "/assert" }, + { + "importpath": "github.com/tinylib/msgp/msgp", + "repository": "https://github.com/tinylib/msgp", + "vcs": "git", + "revision": "ad0ff2e232ad2e37faf67087fb24bf8d04a8ce20", + "branch": "master", + "path": "/msgp", + "notests": true + }, { "importpath": "github.com/tylerb/graceful", "repository": "https://github.com/tylerb/graceful", @@ -1259,6 +1357,23 @@ "branch": "master", "notests": true }, + { + "importpath": "github.com/weaveworks/common", + "repository": "https://github.com/weaveworks/common", + "vcs": "git", + "revision": "5edbff9f695b090e16bbb90dd89b83b70178c49e", + "branch": "master", + "notests": true + }, + { + "importpath": "github.com/weaveworks/docker/pkg/mflag", + "repository": "https://github.com/weaveworks/docker", + "vcs": "git", + "revision": "5761d8b342f0d6352f591968fbe20b4298c4de22", + "branch": "master", + "path": "/pkg/mflag", + "notests": true + }, { "importpath": "github.com/weaveworks/go-checkpoint", "repository": "https://github.com/weaveworks/go-checkpoint", @@ -1346,6 +1461,15 @@ "path": "/unix", "notests": true }, + { + "importpath": "golang.org/x/tools/cover", + "repository": "https://go.googlesource.com/tools", + "vcs": "git", + "revision": "ae1141fc8b3e38d9e074b383af516745815897c3", + "branch": "master", + "path": "/cover", + "notests": true + }, { "importpath": "golang.org/x/tools/go/ast/astutil", "repository": "https://go.googlesource.com/tools",