From 1de65708921cdd42f476c101df15573b614bbcfd Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 10 Sep 2018 14:58:31 -0700 Subject: [PATCH 1/6] vendor vault 0.11.1 api --- .../hashicorp/vault/api/auth_token.go | 57 +++- .../github.com/hashicorp/vault/api/client.go | 226 ++++++++++++--- vendor/github.com/hashicorp/vault/api/help.go | 6 +- .../github.com/hashicorp/vault/api/logical.go | 147 +++++++--- .../github.com/hashicorp/vault/api/renewer.go | 12 +- .../github.com/hashicorp/vault/api/request.go | 72 ++++- .../hashicorp/vault/api/response.go | 8 +- .../github.com/hashicorp/vault/api/secret.go | 104 +++++-- vendor/github.com/hashicorp/vault/api/ssh.go | 13 +- .../hashicorp/vault/api/ssh_agent.go | 51 +--- .../hashicorp/vault/api/sys_audit.go | 92 ++++--- .../hashicorp/vault/api/sys_auth.go | 102 ++++--- .../hashicorp/vault/api/sys_capabilities.go | 29 +- .../hashicorp/vault/api/sys_config_cors.go | 71 ++++- .../hashicorp/vault/api/sys_generate_root.go | 22 +- .../hashicorp/vault/api/sys_health.go | 8 +- .../hashicorp/vault/api/sys_init.go | 11 +- .../hashicorp/vault/api/sys_leader.go | 17 +- .../hashicorp/vault/api/sys_leases.go | 65 ++++- .../hashicorp/vault/api/sys_mounts.go | 134 ++++----- .../hashicorp/vault/api/sys_plugins.go | 28 +- .../hashicorp/vault/api/sys_policy.go | 60 ++-- .../hashicorp/vault/api/sys_rekey.go | 257 +++++++++++++++--- .../hashicorp/vault/api/sys_rotate.go | 59 +++- .../hashicorp/vault/api/sys_seal.go | 11 +- .../hashicorp/vault/api/sys_stepdown.go | 11 +- .../vault/helper/compressutil/compress.go | 7 +- .../hashicorp/vault/helper/jsonutil/json.go | 3 +- .../vault/helper/parseutil/parseutil.go | 45 ++- .../hashicorp/vault/helper/strutil/strutil.go | 68 +++-- vendor/vendor.json | 10 +- 31 files changed, 1334 insertions(+), 472 deletions(-) diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go index 4f74f61fe5f2..c66fba348a20 100644 --- a/vendor/github.com/hashicorp/vault/api/auth_token.go +++ b/vendor/github.com/hashicorp/vault/api/auth_token.go @@ -1,5 +1,7 @@ package api +import "context" + // TokenAuth is used to perform token backend operations on Vault type TokenAuth struct { c *Client @@ -16,7 +18,9 @@ func (c *TokenAuth) Create(opts *TokenCreateRequest) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -31,7 +35,9 @@ func (c *TokenAuth) CreateOrphan(opts *TokenCreateRequest) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -46,7 +52,9 @@ func (c *TokenAuth) CreateWithRole(opts *TokenCreateRequest, roleName string) (* return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -63,7 +71,9 @@ func (c *TokenAuth) Lookup(token string) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -79,7 +89,10 @@ func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { }); err != nil { return nil, err } - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -91,7 +104,9 @@ func (c *TokenAuth) LookupAccessor(accessor string) (*Secret, error) { func (c *TokenAuth) LookupSelf() (*Secret, error) { r := c.c.NewRequest("GET", "/v1/auth/token/lookup-self") - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -109,7 +124,9 @@ func (c *TokenAuth) Renew(token string, increment int) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -126,7 +143,9 @@ func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -146,7 +165,9 @@ func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, erro return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -164,7 +185,10 @@ func (c *TokenAuth) RevokeAccessor(accessor string) error { }); err != nil { return err } - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } @@ -183,7 +207,9 @@ func (c *TokenAuth) RevokeOrphan(token string) error { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } @@ -197,7 +223,10 @@ func (c *TokenAuth) RevokeOrphan(token string) error { // an effect. func (c *TokenAuth) RevokeSelf(token string) error { r := c.c.NewRequest("PUT", "/v1/auth/token/revoke-self") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } @@ -217,7 +246,9 @@ func (c *TokenAuth) RevokeTree(token string) error { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go index ff18b5b68b01..c7ced8237387 100644 --- a/vendor/github.com/hashicorp/vault/api/client.go +++ b/vendor/github.com/hashicorp/vault/api/client.go @@ -1,6 +1,7 @@ package api import ( + "context" "crypto/tls" "fmt" "net" @@ -16,10 +17,12 @@ import ( "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" + retryablehttp "github.com/hashicorp/go-retryablehttp" "github.com/hashicorp/go-rootcerts" + "github.com/hashicorp/vault/helper/consts" "github.com/hashicorp/vault/helper/parseutil" - "github.com/sethgrid/pester" "golang.org/x/net/http2" + "golang.org/x/time/rate" ) const EnvVaultAddress = "VAULT_ADDR" @@ -34,6 +37,7 @@ const EnvVaultWrapTTL = "VAULT_WRAP_TTL" const EnvVaultMaxRetries = "VAULT_MAX_RETRIES" const EnvVaultToken = "VAULT_TOKEN" const EnvVaultMFA = "VAULT_MFA" +const EnvRateLimit = "VAULT_RATE_LIMIT" // WrappingLookupFunc is a function that, given an HTTP verb and a path, // returns an optional string duration to be used for response wrapping (e.g. @@ -59,8 +63,9 @@ type Config struct { // (or http.DefaultClient). HttpClient *http.Client - // MaxRetries controls the maximum number of times to retry when a 5xx error - // occurs. Set to 0 or less to disable retrying. Defaults to 0. + // MaxRetries controls the maximum number of times to retry when a 5xx + // error occurs. Set to 0 to disable retrying. Defaults to 2 (for a total + // of three tries). MaxRetries int // Timeout is for setting custom timeout parameter in the HttpClient @@ -69,6 +74,16 @@ type Config struct { // If there is an error when creating the configuration, this will be the // error Error error + + // The Backoff function to use; a default is used if not provided + Backoff retryablehttp.Backoff + + // Limiter is the rate limiter used by the client. + // If this pointer is nil, then there will be no limit set. + // In contrast, if this pointer is set, even to an empty struct, + // then that limiter will be used. Note that an empty Limiter + // is equivalent blocking all events. + Limiter *rate.Limiter } // TLSConfig contains the parameters needed to configure TLS on the HTTP client @@ -131,12 +146,15 @@ func DefaultConfig() *Config { // but in e.g. http_test actual redirect handling is necessary config.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error { // Returning this value causes the Go net library to not close the - // response body and to nil out the error. Otherwise pester tries - // three times on every redirect because it sees an error from this + // response body and to nil out the error. Otherwise retry clients may + // try three times on every redirect because it sees an error from this // function (to prevent redirects) passing through to it. return http.ErrUseLastResponse } + config.Backoff = retryablehttp.LinearJitterBackoff + config.MaxRetries = 2 + return config } @@ -160,7 +178,7 @@ func (c *Config) ConfigureTLS(t *TLSConfig) error { } foundClientCert = true case t.ClientCert != "" || t.ClientKey != "": - return fmt.Errorf("Both client cert and client key must be provided") + return fmt.Errorf("both client cert and client key must be provided") } if t.CACert != "" || t.CAPath != "" { @@ -205,6 +223,7 @@ func (c *Config) ReadEnvironment() error { var envInsecure bool var envTLSServerName string var envMaxRetries *uint64 + var limit *rate.Limiter // Parse the environment variables if v := os.Getenv(EnvVaultAddress); v != "" { @@ -229,10 +248,17 @@ func (c *Config) ReadEnvironment() error { if v := os.Getenv(EnvVaultClientKey); v != "" { envClientKey = v } + if v := os.Getenv(EnvRateLimit); v != "" { + rateLimit, burstLimit, err := parseRateLimit(v) + if err != nil { + return err + } + limit = rate.NewLimiter(rate.Limit(rateLimit), burstLimit) + } if t := os.Getenv(EnvVaultClientTimeout); t != "" { clientTimeout, err := parseutil.ParseDurationSecond(t) if err != nil { - return fmt.Errorf("Could not parse %s", EnvVaultClientTimeout) + return fmt.Errorf("could not parse %q", EnvVaultClientTimeout) } envClientTimeout = clientTimeout } @@ -240,7 +266,7 @@ func (c *Config) ReadEnvironment() error { var err error envInsecure, err = strconv.ParseBool(v) if err != nil { - return fmt.Errorf("Could not parse VAULT_SKIP_VERIFY") + return fmt.Errorf("could not parse VAULT_SKIP_VERIFY") } } if v := os.Getenv(EnvVaultTLSServerName); v != "" { @@ -260,6 +286,8 @@ func (c *Config) ReadEnvironment() error { c.modifyLock.Lock() defer c.modifyLock.Unlock() + c.Limiter = limit + if err := c.ConfigureTLS(t); err != nil { return err } @@ -269,7 +297,7 @@ func (c *Config) ReadEnvironment() error { } if envMaxRetries != nil { - c.MaxRetries = int(*envMaxRetries) + 1 + c.MaxRetries = int(*envMaxRetries) } if envClientTimeout != 0 { @@ -279,6 +307,21 @@ func (c *Config) ReadEnvironment() error { return nil } +func parseRateLimit(val string) (rate float64, burst int, err error) { + + _, err = fmt.Sscanf(val, "%f:%d", &rate, &burst) + if err != nil { + rate, err = strconv.ParseFloat(val, 64) + if err != nil { + err = fmt.Errorf("%v was provided but incorrectly formatted", EnvRateLimit) + } + burst = int(rate) + } + + return rate, burst, err + +} + // Client is the client to the Vault API. Create a client with NewClient. type Client struct { modifyLock sync.RWMutex @@ -346,11 +389,12 @@ func (c *Client) SetAddress(addr string) error { c.modifyLock.Lock() defer c.modifyLock.Unlock() - var err error - if c.addr, err = url.Parse(addr); err != nil { - return fmt.Errorf("failed to set address: %v", err) + parsedAddr, err := url.Parse(addr) + if err != nil { + return errwrap.Wrapf("failed to set address: {{err}}", err) } + c.addr = parsedAddr return nil } @@ -362,6 +406,18 @@ func (c *Client) Address() string { return c.addr.String() } +// SetLimiter will set the rate limiter for this client. +// This method is thread-safe. +// rateLimit and burst are specified according to https://godoc.org/golang.org/x/time/rate#NewLimiter +func (c *Client) SetLimiter(rateLimit float64, burst int) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.Limiter = rate.NewLimiter(rate.Limit(rateLimit), burst) +} + // SetMaxRetries sets the number of retries that will be used in the case of certain errors func (c *Client) SetMaxRetries(retries int) { c.modifyLock.RLock() @@ -382,6 +438,15 @@ func (c *Client) SetClientTimeout(timeout time.Duration) { c.config.Timeout = timeout } +// CurrentWrappingLookupFunc sets a lookup function that returns desired wrap TTLs +// for a given operation and path +func (c *Client) CurrentWrappingLookupFunc() WrappingLookupFunc { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + return c.wrappingLookupFunc +} + // SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs // for a given operation and path func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) { @@ -400,6 +465,19 @@ func (c *Client) SetMFACreds(creds []string) { c.mfaCreds = creds } +// SetNamespace sets the namespace supplied either via the environment +// variable or via the command line. +func (c *Client) SetNamespace(namespace string) { + c.modifyLock.Lock() + defer c.modifyLock.Unlock() + + if c.headers == nil { + c.headers = make(http.Header) + } + + c.headers.Set(consts.NamespaceHeaderName, namespace) +} + // Token returns the access token being used by this client. It will // return the empty string if there is no token set. func (c *Client) Token() string { @@ -426,6 +504,26 @@ func (c *Client) ClearToken() { c.token = "" } +// Headers gets the current set of headers used for requests. This returns a +// copy; to modify it make modifications locally and use SetHeaders. +func (c *Client) Headers() http.Header { + c.modifyLock.RLock() + defer c.modifyLock.RUnlock() + + if c.headers == nil { + return nil + } + + ret := make(http.Header) + for k, v := range c.headers { + for _, val := range v { + ret[k] = append(ret[k], val) + } + } + + return ret +} + // SetHeaders sets the headers to be used for future requests. func (c *Client) SetHeaders(headers http.Header) { c.modifyLock.Lock() @@ -434,6 +532,16 @@ func (c *Client) SetHeaders(headers http.Header) { c.headers = headers } +// SetBackoff sets the backoff function to be used for future requests. +func (c *Client) SetBackoff(backoff retryablehttp.Backoff) { + c.modifyLock.RLock() + c.config.modifyLock.Lock() + defer c.config.modifyLock.Unlock() + c.modifyLock.RUnlock() + + c.config.Backoff = backoff +} + // Clone creates a new client with the same configuration. Note that the same // underlying http.Client is used; modifying the client from more than one // goroutine at once may not be safe, so modify the client as needed and then @@ -449,6 +557,8 @@ func (c *Client) Clone() (*Client, error) { HttpClient: config.HttpClient, MaxRetries: config.MaxRetries, Timeout: config.Timeout, + Backoff: config.Backoff, + Limiter: config.Limiter, } config.modifyLock.RUnlock() @@ -470,14 +580,20 @@ func (c *Client) SetPolicyOverride(override bool) { // doesn't need to be called externally. func (c *Client) NewRequest(method, requestPath string) *Request { c.modifyLock.RLock() - defer c.modifyLock.RUnlock() + addr := c.addr + token := c.token + mfaCreds := c.mfaCreds + wrappingLookupFunc := c.wrappingLookupFunc + headers := c.headers + policyOverride := c.policyOverride + c.modifyLock.RUnlock() // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV // record and take the highest match; this is not designed for high-availability, just discovery - var host string = c.addr.Host - if c.addr.Port() == "" { + var host string = addr.Host + if addr.Port() == "" { // Internet Draft specifies that the SRV record is ignored if a port is given - _, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname()) + _, addrs, err := net.LookupSRV("http", "tcp", addr.Hostname()) if err == nil && len(addrs) > 0 { host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port) } @@ -486,12 +602,12 @@ func (c *Client) NewRequest(method, requestPath string) *Request { req := &Request{ Method: method, URL: &url.URL{ - User: c.addr.User, - Scheme: c.addr.Scheme, + User: addr.User, + Scheme: addr.Scheme, Host: host, - Path: path.Join(c.addr.Path, requestPath), + Path: path.Join(addr.Path, requestPath), }, - ClientToken: c.token, + ClientToken: token, Params: make(map[string][]string), } @@ -505,21 +621,19 @@ func (c *Client) NewRequest(method, requestPath string) *Request { lookupPath = requestPath } - req.MFAHeaderVals = c.mfaCreds + req.MFAHeaderVals = mfaCreds - if c.wrappingLookupFunc != nil { - req.WrapTTL = c.wrappingLookupFunc(method, lookupPath) + if wrappingLookupFunc != nil { + req.WrapTTL = wrappingLookupFunc(method, lookupPath) } else { req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath) } - if c.config.Timeout != 0 { - c.config.HttpClient.Timeout = c.config.Timeout - } - if c.headers != nil { - req.Headers = c.headers + + if headers != nil { + req.Headers = headers } - req.PolicyOverride = c.policyOverride + req.PolicyOverride = policyOverride return req } @@ -528,30 +642,66 @@ func (c *Client) NewRequest(method, requestPath string) *Request { // a Vault server not configured with this client. This is an advanced operation // that generally won't need to be called externally. func (c *Client) RawRequest(r *Request) (*Response, error) { + return c.RawRequestWithContext(context.Background(), r) +} + +// RawRequestWithContext performs the raw request given. This request may be against +// a Vault server not configured with this client. This is an advanced operation +// that generally won't need to be called externally. +func (c *Client) RawRequestWithContext(ctx context.Context, r *Request) (*Response, error) { c.modifyLock.RLock() - c.config.modifyLock.RLock() - defer c.config.modifyLock.RUnlock() token := c.token + + c.config.modifyLock.RLock() + limiter := c.config.Limiter + maxRetries := c.config.MaxRetries + backoff := c.config.Backoff + httpClient := c.config.HttpClient + timeout := c.config.Timeout + c.config.modifyLock.RUnlock() + c.modifyLock.RUnlock() + if limiter != nil { + limiter.Wait(ctx) + } + // Sanity check the token before potentially erroring from the API idx := strings.IndexFunc(token, func(c rune) bool { return !unicode.IsPrint(c) }) if idx != -1 { - return nil, fmt.Errorf("Configured Vault token contains non-printable characters and cannot be used.") + return nil, fmt.Errorf("configured Vault token contains non-printable characters and cannot be used") } redirectCount := 0 START: - req, err := r.ToHTTP() + req, err := r.toRetryableHTTP() if err != nil { return nil, err } + if req == nil { + return nil, fmt.Errorf("nil request created") + } - client := pester.NewExtendedClient(c.config.HttpClient) - client.Backoff = pester.LinearJitterBackoff - client.MaxRetries = c.config.MaxRetries + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + req.Request = req.Request.WithContext(ctx) + + if backoff == nil { + backoff = retryablehttp.LinearJitterBackoff + } + + client := &retryablehttp.Client{ + HTTPClient: httpClient, + RetryWaitMin: 1000 * time.Millisecond, + RetryWaitMax: 1500 * time.Millisecond, + RetryMax: maxRetries, + CheckRetry: retryablehttp.DefaultRetryPolicy, + Backoff: backoff, + ErrorHandler: retryablehttp.PassthroughErrorHandler, + } var result *Response resp, err := client.Do(req) @@ -560,8 +710,8 @@ START: } if err != nil { if strings.Contains(err.Error(), "tls: oversized") { - err = fmt.Errorf( - "%s\n\n"+ + err = errwrap.Wrapf( + "{{err}}\n\n"+ "This error usually means that the server is running with TLS disabled\n"+ "but the client is configured to use TLS. Please either enable TLS\n"+ "on the server or run the client with -address set to an address\n"+ diff --git a/vendor/github.com/hashicorp/vault/api/help.go b/vendor/github.com/hashicorp/vault/api/help.go index b9ae100bc524..472ca0395ead 100644 --- a/vendor/github.com/hashicorp/vault/api/help.go +++ b/vendor/github.com/hashicorp/vault/api/help.go @@ -1,6 +1,7 @@ package api import ( + "context" "fmt" ) @@ -8,7 +9,10 @@ import ( func (c *Client) Help(path string) (*Help, error) { r := c.NewRequest("GET", fmt.Sprintf("/v1/%s", path)) r.Params.Add("help", "1") - resp, err := c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/logical.go b/vendor/github.com/hashicorp/vault/api/logical.go index a492b5ab9230..d13daac6e97f 100644 --- a/vendor/github.com/hashicorp/vault/api/logical.go +++ b/vendor/github.com/hashicorp/vault/api/logical.go @@ -2,10 +2,13 @@ package api import ( "bytes" + "context" "fmt" - "net/http" + "io" + "net/url" "os" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/jsonutil" ) @@ -44,12 +47,44 @@ func (c *Client) Logical() *Logical { } func (c *Logical) Read(path string) (*Secret, error) { + return c.ReadWithData(path, nil) +} + +func (c *Logical) ReadWithData(path string, data map[string][]string) (*Secret, error) { r := c.c.NewRequest("GET", "/v1/"+path) - resp, err := c.c.RawRequest(r) + + var values url.Values + for k, v := range data { + if values == nil { + values = make(url.Values) + } + for _, val := range v { + values.Add(k, val) + } + } + + if values != nil { + r.Params = values + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } return nil, nil } if err != nil { @@ -65,11 +100,25 @@ func (c *Logical) List(path string) (*Secret, error) { // handle the wrapping lookup function r.Method = "GET" r.Params.Set("list", "true") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil + } return nil, nil } if err != nil { @@ -85,36 +134,59 @@ func (c *Logical) Write(path string, data map[string]interface{}) (*Secret, erro return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } if err != nil { return nil, err } - if resp.StatusCode == 200 { - return ParseSecret(resp.Body) - } - - return nil, nil + return ParseSecret(resp.Body) } func (c *Logical) Delete(path string) (*Secret, error) { r := c.c.NewRequest("DELETE", "/v1/"+path) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } + if resp != nil && resp.StatusCode == 404 { + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: + return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, err + } + } if err != nil { return nil, err } - if resp.StatusCode == 200 { - return ParseSecret(resp.Body) - } - - return nil, nil + return ParseSecret(resp.Body) } func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { @@ -134,40 +206,49 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() } - - // Return all errors except those that are from a 404 as we handle the not - // found error as a special case. - if err != nil && (resp == nil || resp.StatusCode != 404) { - return nil, err - } - if resp == nil { - return nil, nil + if resp == nil || resp.StatusCode != 404 { + if err != nil { + return nil, err + } + if resp == nil { + return nil, nil + } + return ParseSecret(resp.Body) } - switch resp.StatusCode { - case http.StatusOK: // New method is supported - return ParseSecret(resp.Body) - case http.StatusNotFound: // Fall back to old method - default: + // In the 404 case this may actually be a wrapped 404 error + secret, parseErr := ParseSecret(resp.Body) + switch parseErr { + case nil: + case io.EOF: return nil, nil + default: + return nil, err + } + if secret != nil && (len(secret.Warnings) > 0 || len(secret.Data) > 0) { + return secret, nil } + // Otherwise this might be an old-style wrapping token so attempt the old + // method if wrappingToken != "" { origToken := c.c.Token() defer c.c.SetToken(origToken) c.c.SetToken(wrappingToken) } - secret, err := c.Read(wrappedResponseLocation) + secret, err = c.Read(wrappedResponseLocation) if err != nil { - return nil, fmt.Errorf("error reading %s: %s", wrappedResponseLocation, err) + return nil, errwrap.Wrapf(fmt.Sprintf("error reading %q: {{err}}", wrappedResponseLocation), err) } if secret == nil { - return nil, fmt.Errorf("no value found at %s", wrappedResponseLocation) + return nil, fmt.Errorf("no value found at %q", wrappedResponseLocation) } if secret.Data == nil { return nil, fmt.Errorf("\"data\" not found in wrapping response") @@ -179,7 +260,7 @@ func (c *Logical) Unwrap(wrappingToken string) (*Secret, error) { wrappedSecret := new(Secret) buf := bytes.NewBufferString(secret.Data["response"].(string)) if err := jsonutil.DecodeJSONFromReader(buf, wrappedSecret); err != nil { - return nil, fmt.Errorf("error unmarshalling wrapped secret: %s", err) + return nil, errwrap.Wrapf("error unmarshalling wrapped secret: {{err}}", err) } return wrappedSecret, nil diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go index 7fd1de7db20d..1d37a1938212 100644 --- a/vendor/github.com/hashicorp/vault/api/renewer.go +++ b/vendor/github.com/hashicorp/vault/api/renewer.go @@ -64,9 +64,7 @@ type RenewerInput struct { // Secret is the secret to renew Secret *Secret - // Grace is a minimum renewal before returning so the upstream client - // can do a re-read. This can be used to prevent clients from waiting - // too long to read a new credential and incur downtime. + // DEPRECATED: this does not do anything. Grace time.Duration // Rand is the randomizer to use for underlying randomization. If not @@ -107,8 +105,6 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) { return nil, ErrRenewerMissingSecret } - grace := i.Grace - random := i.Rand if random == nil { random = rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) @@ -122,7 +118,6 @@ func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) { return &Renewer{ client: c, secret: secret, - grace: grace, increment: i.Increment, random: random, doneCh: make(chan error, 1), @@ -166,10 +161,7 @@ func (r *Renewer) Renew() { result = r.renewLease() } - select { - case r.doneCh <- result: - case <-r.stopCh: - } + r.doneCh <- result } // renewAuth is a helper for renewing authentication. diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go index a5d8e75a63a9..5bcff8c6c510 100644 --- a/vendor/github.com/hashicorp/vault/api/request.go +++ b/vendor/github.com/hashicorp/vault/api/request.go @@ -4,8 +4,11 @@ import ( "bytes" "encoding/json" "io" + "io/ioutil" "net/http" "net/url" + + retryablehttp "github.com/hashicorp/go-retryablehttp" ) // Request is a raw request configuration structure used to initiate @@ -19,8 +22,14 @@ type Request struct { MFAHeaderVals []string WrapTTL string Obj interface{} - Body io.Reader - BodySize int64 + + // When possible, use BodyBytes as it is more efficient due to how the + // retry logic works + BodyBytes []byte + + // Fallback + Body io.Reader + BodySize int64 // Whether to request overriding soft-mandatory Sentinel policies (RGPs and // EGPs). If set, the override flag will take effect for all policies @@ -30,34 +39,73 @@ type Request struct { // SetJSONBody is used to set a request body that is a JSON-encoded value. func (r *Request) SetJSONBody(val interface{}) error { - buf := bytes.NewBuffer(nil) - enc := json.NewEncoder(buf) - if err := enc.Encode(val); err != nil { + buf, err := json.Marshal(val) + if err != nil { return err } r.Obj = val - r.Body = buf - r.BodySize = int64(buf.Len()) + r.BodyBytes = buf return nil } // ResetJSONBody is used to reset the body for a redirect func (r *Request) ResetJSONBody() error { - if r.Body == nil { + if r.BodyBytes == nil { return nil } return r.SetJSONBody(r.Obj) } -// ToHTTP turns this request into a valid *http.Request for use with the -// net/http package. +// DEPRECATED: ToHTTP turns this request into a valid *http.Request for use +// with the net/http package. func (r *Request) ToHTTP() (*http.Request, error) { + req, err := r.toRetryableHTTP() + if err != nil { + return nil, err + } + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + req.Request.Body = ioutil.NopCloser(bytes.NewReader(r.BodyBytes)) + + default: + if c, ok := r.Body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(r.Body) + } + } + + return req.Request, nil +} + +func (r *Request) toRetryableHTTP() (*retryablehttp.Request, error) { // Encode the query parameters r.URL.RawQuery = r.Params.Encode() - // Create the HTTP request - req, err := http.NewRequest(r.Method, r.URL.RequestURI(), r.Body) + // Create the HTTP request, defaulting to retryable + var req *retryablehttp.Request + + var err error + var body interface{} + + switch { + case r.BodyBytes == nil && r.Body == nil: + // No body + + case r.BodyBytes != nil: + // Use bytes, it's more efficient + body = r.BodyBytes + + default: + body = r.Body + } + + req, err = retryablehttp.NewRequest(r.Method, r.URL.RequestURI(), body) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go index 05502e1b0f62..053a2772386a 100644 --- a/vendor/github.com/hashicorp/vault/api/response.go +++ b/vendor/github.com/hashicorp/vault/api/response.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "io" + "io/ioutil" "net/http" "github.com/hashicorp/vault/helper/jsonutil" @@ -33,11 +34,14 @@ func (r *Response) Error() error { // We have an error. Let's copy the body into our own buffer first, // so that if we can't decode JSON, we can at least copy it raw. - var bodyBuf bytes.Buffer - if _, err := io.Copy(&bodyBuf, r.Body); err != nil { + bodyBuf := &bytes.Buffer{} + if _, err := io.Copy(bodyBuf, r.Body); err != nil { return err } + r.Body.Close() + r.Body = ioutil.NopCloser(bodyBuf) + // Decode the error response if we can. Note that we wrap the bodyBuf // in a bytes.Reader here so that the JSON decoder doesn't move the // read pointer for the original buffer. diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go index 4891651622ea..e25962604b4e 100644 --- a/vendor/github.com/hashicorp/vault/api/secret.go +++ b/vendor/github.com/hashicorp/vault/api/secret.go @@ -1,10 +1,12 @@ package api import ( + "bytes" "fmt" "io" "time" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/jsonutil" "github.com/hashicorp/vault/helper/parseutil" ) @@ -100,7 +102,8 @@ func (s *Secret) TokenRemainingUses() (int, error) { } // TokenPolicies returns the standardized list of policies for the given secret. -// If the secret is nil or does not contain any policies, this returns nil. +// If the secret is nil or does not contain any policies, this returns nil. It +// also populates the secret's Auth info with identity/token policy info. func (s *Secret) TokenPolicies() ([]string, error) { if s == nil { return nil, nil @@ -114,25 +117,75 @@ func (s *Secret) TokenPolicies() ([]string, error) { return nil, nil } - sList, ok := s.Data["policies"].([]string) - if ok { - return sList, nil - } + var tokenPolicies []string - list, ok := s.Data["policies"].([]interface{}) - if !ok { - return nil, fmt.Errorf("unable to convert token policies to expected format") + // Token policies + { + _, ok := s.Data["policies"] + if !ok { + goto TOKEN_DONE + } + + sList, ok := s.Data["policies"].([]string) + if ok { + tokenPolicies = sList + goto TOKEN_DONE + } + + list, ok := s.Data["policies"].([]interface{}) + if !ok { + return nil, fmt.Errorf("unable to convert token policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + tokenPolicies = append(tokenPolicies, p) + } } - policies := make([]string, len(list)) - for i := range list { - p, ok := list[i].(string) +TOKEN_DONE: + var identityPolicies []string + + // Identity policies + { + _, ok := s.Data["identity_policies"] + if !ok { + goto DONE + } + + sList, ok := s.Data["identity_policies"].([]string) + if ok { + identityPolicies = sList + goto DONE + } + + list, ok := s.Data["identity_policies"].([]interface{}) if !ok { - return nil, fmt.Errorf("unable to convert policy %v to string", list[i]) + return nil, fmt.Errorf("unable to convert identity policies to expected format") + } + for _, v := range list { + p, ok := v.(string) + if !ok { + return nil, fmt.Errorf("unable to convert policy %v to string", v) + } + identityPolicies = append(identityPolicies, p) } - policies[i] = p } +DONE: + + if s.Auth == nil { + s.Auth = &SecretAuth{} + } + + policies := append(tokenPolicies, identityPolicies...) + + s.Auth.TokenPolicies = tokenPolicies + s.Auth.IdentityPolicies = identityPolicies + s.Auth.Policies = policies + return policies, nil } @@ -190,7 +243,7 @@ func (s *Secret) TokenIsRenewable() (bool, error) { renewable, err := parseutil.ParseBool(s.Data["renewable"]) if err != nil { - return false, fmt.Errorf("could not convert renewable value to a boolean: %v", err) + return false, errwrap.Wrapf("could not convert renewable value to a boolean: {{err}}", err) } return renewable, nil @@ -233,10 +286,12 @@ type SecretWrapInfo struct { // SecretAuth is the structure containing auth information if we have it. type SecretAuth struct { - ClientToken string `json:"client_token"` - Accessor string `json:"accessor"` - Policies []string `json:"policies"` - Metadata map[string]string `json:"metadata"` + ClientToken string `json:"client_token"` + Accessor string `json:"accessor"` + Policies []string `json:"policies"` + TokenPolicies []string `json:"token_policies"` + IdentityPolicies []string `json:"identity_policies"` + Metadata map[string]string `json:"metadata"` LeaseDuration int `json:"lease_duration"` Renewable bool `json:"renewable"` @@ -244,9 +299,20 @@ type SecretAuth struct { // ParseSecret is used to parse a secret value from JSON from an io.Reader. func ParseSecret(r io.Reader) (*Secret, error) { + // First read the data into a buffer. Not super efficient but we want to + // know if we actually have a body or not. + var buf bytes.Buffer + _, err := buf.ReadFrom(r) + if err != nil { + return nil, err + } + if buf.Len() == 0 { + return nil, nil + } + // First decode the JSON into a map[string]interface{} var secret Secret - if err := jsonutil.DecodeJSONFromReader(r, &secret); err != nil { + if err := jsonutil.DecodeJSONFromReader(&buf, &secret); err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go index a17b0eb230a9..837eac4ff78d 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh.go +++ b/vendor/github.com/hashicorp/vault/api/ssh.go @@ -1,6 +1,9 @@ package api -import "fmt" +import ( + "context" + "fmt" +) // SSH is used to return a client to invoke operations on SSH backend. type SSH struct { @@ -28,7 +31,9 @@ func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, err return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -45,7 +50,9 @@ func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/ssh_agent.go b/vendor/github.com/hashicorp/vault/api/ssh_agent.go index 729fd99c43f6..1dd681a5d490 100644 --- a/vendor/github.com/hashicorp/vault/api/ssh_agent.go +++ b/vendor/github.com/hashicorp/vault/api/ssh_agent.go @@ -1,17 +1,20 @@ package api import ( + "context" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "os" + "github.com/hashicorp/errwrap" "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-rootcerts" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/vault/helper/hclutil" "github.com/mitchellh/mapstructure" ) @@ -41,16 +44,16 @@ type SSHHelper struct { type SSHVerifyResponse struct { // Usually empty. If the request OTP is echo request message, this will // be set to the corresponding echo response message. - Message string `json:"message" structs:"message" mapstructure:"message"` + Message string `json:"message" mapstructure:"message"` // Username associated with the OTP - Username string `json:"username" structs:"username" mapstructure:"username"` + Username string `json:"username" mapstructure:"username"` // IP associated with the OTP - IP string `json:"ip" structs:"ip" mapstructure:"ip"` + IP string `json:"ip" mapstructure:"ip"` // Name of the role against which the OTP was issued - RoleName string `json:"role_name" structs:"role_name" mapstructure:"role_name"` + RoleName string `json:"role_name" mapstructure:"role_name"` } // SSHHelperConfig is a structure which represents the entries from the vault-ssh-helper's configuration file. @@ -141,12 +144,12 @@ func LoadSSHHelperConfig(path string) (*SSHHelperConfig, error) { func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { root, err := hcl.Parse(string(contents)) if err != nil { - return nil, fmt.Errorf("ssh_helper: error parsing config: %s", err) + return nil, errwrap.Wrapf("error parsing config: {{err}}", err) } list, ok := root.Node.(*ast.ObjectList) if !ok { - return nil, fmt.Errorf("ssh_helper: error parsing config: file doesn't contain a root object") + return nil, fmt.Errorf("error parsing config: file doesn't contain a root object") } valid := []string{ @@ -159,7 +162,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { "tls_skip_verify", "tls_server_name", } - if err := checkHCLKeys(list, valid); err != nil { + if err := hclutil.CheckHCLKeys(list, valid); err != nil { return nil, multierror.Prefix(err, "ssh_helper:") } @@ -170,7 +173,7 @@ func ParseSSHHelperConfig(contents string) (*SSHHelperConfig, error) { } if c.VaultAddr == "" { - return nil, fmt.Errorf("ssh_helper: missing config 'vault_addr'") + return nil, fmt.Errorf(`missing config "vault_addr"`) } return &c, nil } @@ -205,7 +208,9 @@ func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -227,31 +232,3 @@ func (c *SSHHelper) Verify(otp string) (*SSHVerifyResponse, error) { } return &verifyResp, nil } - -func checkHCLKeys(node ast.Node, valid []string) error { - var list *ast.ObjectList - switch n := node.(type) { - case *ast.ObjectList: - list = n - case *ast.ObjectType: - list = n.List - default: - return fmt.Errorf("cannot check HCL keys of type %T", n) - } - - validMap := make(map[string]struct{}, len(valid)) - for _, v := range valid { - validMap[v] = struct{}{} - } - - var result error - for _, item := range list.Items { - key := item.Keys[0].Token.Value().(string) - if _, ok := validMap[key]; !ok { - result = multierror.Append(result, fmt.Errorf( - "invalid key '%s' on line %d", key, item.Assign.Line)) - } - } - - return result -} diff --git a/vendor/github.com/hashicorp/vault/api/sys_audit.go b/vendor/github.com/hashicorp/vault/api/sys_audit.go index 89f2141664af..2448c0367cbd 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_audit.go +++ b/vendor/github.com/hashicorp/vault/api/sys_audit.go @@ -1,9 +1,10 @@ package api import ( + "context" + "errors" "fmt" - "github.com/fatih/structs" "github.com/mitchellh/mapstructure" ) @@ -17,56 +18,58 @@ func (c *Sys) AuditHash(path string, input string) (string, error) { return "", err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return "", err } defer resp.Body.Close() - type d struct { - Hash string `json:"hash"` - } - - var result d - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return "", err } + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") + } + + hash, ok := secret.Data["hash"] + if !ok { + return "", errors.New("hash not found in response data") + } + hashStr, ok := hash.(string) + if !ok { + return "", errors.New("could not parse hash in response data") + } - return result.Hash, err + return hashStr, nil } func (c *Sys) ListAudit() (map[string]*Audit, error) { r := c.c.NewRequest("GET", "/v1/sys/audit") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { return nil, err } defer resp.Body.Close() - var result map[string]interface{} - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } mounts := map[string]*Audit{} - for k, v := range result { - switch v.(type) { - case map[string]interface{}: - default: - continue - } - var res Audit - err = mapstructure.Decode(v, &res) - if err != nil { - return nil, err - } - // Not a mount, some other api.Secret data - if res.Type == "" { - continue - } - mounts[k] = &res + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err } return mounts, nil @@ -83,14 +86,15 @@ func (c *Sys) EnableAudit( } func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) error { - body := structs.Map(options) - r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/sys/audit/%s", path)) - if err := r.SetJSONBody(body); err != nil { + if err := r.SetJSONBody(options); err != nil { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { return err } @@ -101,7 +105,11 @@ func (c *Sys) EnableAuditWithOptions(path string, options *EnableAuditOptions) e func (c *Sys) DisableAudit(path string) error { r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/audit/%s", path)) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { defer resp.Body.Close() } @@ -113,16 +121,16 @@ func (c *Sys) DisableAudit(path string) error { // documentation. Please refer to that documentation for more details. type EnableAuditOptions struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Options map[string]string `json:"options" structs:"options"` - Local bool `json:"local" structs:"local"` + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` } type Audit struct { - Path string - Type string - Description string - Options map[string]string - Local bool + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Options map[string]string `json:"options" mapstructure:"options"` + Local bool `json:"local" mapstructure:"local"` + Path string `json:"path" mapstructure:"path"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go index 6be90989d348..447c5d54b765 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_auth.go +++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go @@ -1,43 +1,36 @@ package api import ( + "context" + "errors" "fmt" - "github.com/fatih/structs" "github.com/mitchellh/mapstructure" ) func (c *Sys) ListAuth() (map[string]*AuthMount, error) { r := c.c.NewRequest("GET", "/v1/sys/auth") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() - var result map[string]interface{} - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } mounts := map[string]*AuthMount{} - for k, v := range result { - switch v.(type) { - case map[string]interface{}: - default: - continue - } - var res AuthMount - err = mapstructure.Decode(v, &res) - if err != nil { - return nil, err - } - // Not a mount, some other api.Secret data - if res.Type == "" { - continue - } - mounts[k] = &res + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err } return mounts, nil @@ -52,14 +45,14 @@ func (c *Sys) EnableAuth(path, authType, desc string) error { } func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) error { - body := structs.Map(options) - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/auth/%s", path)) - if err := r.SetJSONBody(body); err != nil { + if err := r.SetJSONBody(options); err != nil { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } @@ -70,7 +63,10 @@ func (c *Sys) EnableAuthWithOptions(path string, options *EnableAuthOptions) err func (c *Sys) DisableAuth(path string) error { r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/auth/%s", path)) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -82,41 +78,41 @@ func (c *Sys) DisableAuth(path string) error { // documentation. Please refer to that documentation for more details. type EnableAuthOptions struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Config AuthConfigInput `json:"config" structs:"config"` - Local bool `json:"local" structs:"local"` - PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty"` - SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap"` - Options map[string]string `json:"options" structs:"options" mapstructure:"options"` + Type string `json:"type"` + Description string `json:"description"` + Config AuthConfigInput `json:"config"` + Local bool `json:"local"` + PluginName string `json:"plugin_name,omitempty"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + Options map[string]string `json:"options" mapstructure:"options"` } type AuthConfigInput struct { - DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` - PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` } type AuthMount struct { - Type string `json:"type" structs:"type" mapstructure:"type"` - Description string `json:"description" structs:"description" mapstructure:"description"` - Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"` - Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"` - Local bool `json:"local" structs:"local" mapstructure:"local"` - SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap"` - Options map[string]string `json:"options" structs:"options" mapstructure:"options"` + Type string `json:"type" mapstructure:"type"` + Description string `json:"description" mapstructure:"description"` + Accessor string `json:"accessor" mapstructure:"accessor"` + Config AuthConfigOutput `json:"config" mapstructure:"config"` + Local bool `json:"local" mapstructure:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` + Options map[string]string `json:"options" mapstructure:"options"` } type AuthConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` - PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"` + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go index 80f621884955..242acf96e7a1 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_capabilities.go +++ b/vendor/github.com/hashicorp/vault/api/sys_capabilities.go @@ -1,6 +1,12 @@ package api -import "fmt" +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) func (c *Sys) CapabilitiesSelf(path string) ([]string, error) { return c.Capabilities(c.c.Token(), path) @@ -22,22 +28,27 @@ func (c *Sys) Capabilities(token, path string) ([]string, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() - var result map[string]interface{} - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } - var capabilities []string - capabilitiesRaw := result["capabilities"].([]interface{}) - for _, capability := range capabilitiesRaw { - capabilities = append(capabilities, capability.(string)) + var res []string + err = mapstructure.Decode(secret.Data[path], &res) + if err != nil { + return nil, err } - return capabilities, nil + + return res, nil } diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go index e7f2a59453c7..d153a47c3a49 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go +++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go @@ -1,15 +1,37 @@ package api +import ( + "context" + "errors" + + "github.com/mitchellh/mapstructure" +) + func (c *Sys) CORSStatus() (*CORSResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/config/cors") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + var result CORSResponse - err = resp.DecodeJSON(&result) + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + return &result, err } @@ -19,38 +41,65 @@ func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + var result CORSResponse - err = resp.DecodeJSON(&result) + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + return &result, err } func (c *Sys) DisableCORS() (*CORSResponse, error) { r := c.c.NewRequest("DELETE", "/v1/sys/config/cors") - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + var result CORSResponse - err = resp.DecodeJSON(&result) - return &result, err + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + return &result, err } type CORSRequest struct { - AllowedOrigins string `json:"allowed_origins"` - Enabled bool `json:"enabled"` + AllowedOrigins string `json:"allowed_origins" mapstructure:"allowed_origins"` + Enabled bool `json:"enabled" mapstructure:"enabled"` } type CORSResponse struct { - AllowedOrigins string `json:"allowed_origins"` - Enabled bool `json:"enabled"` + AllowedOrigins string `json:"allowed_origins" mapstructure:"allowed_origins"` + Enabled bool `json:"enabled" mapstructure:"enabled"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go index adb5496d4e4f..66f72dff69eb 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_generate_root.go +++ b/vendor/github.com/hashicorp/vault/api/sys_generate_root.go @@ -1,5 +1,7 @@ package api +import "context" + func (c *Sys) GenerateRootStatus() (*GenerateRootStatusResponse, error) { return c.generateRootStatusCommon("/v1/sys/generate-root/attempt") } @@ -10,7 +12,10 @@ func (c *Sys) GenerateDROperationTokenStatus() (*GenerateRootStatusResponse, err func (c *Sys) generateRootStatusCommon(path string) (*GenerateRootStatusResponse, error) { r := c.c.NewRequest("GET", path) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -40,7 +45,9 @@ func (c *Sys) generateRootInitCommon(path, otp, pgpKey string) (*GenerateRootSta return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -61,7 +68,10 @@ func (c *Sys) GenerateDROperationTokenCancel() error { func (c *Sys) generateRootCancelCommon(path string) error { r := c.c.NewRequest("DELETE", path) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -87,7 +97,9 @@ func (c *Sys) generateRootUpdateCommon(path, shard, nonce string) (*GenerateRoot return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -107,4 +119,6 @@ type GenerateRootStatusResponse struct { EncodedToken string `json:"encoded_token"` EncodedRootToken string `json:"encoded_root_token"` PGPFingerprint string `json:"pgp_fingerprint"` + OTP string `json:"otp"` + OTPLength int `json:"otp_length"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go index 82fd1f6f99b0..bd74e8269dfc 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_health.go +++ b/vendor/github.com/hashicorp/vault/api/sys_health.go @@ -1,5 +1,7 @@ package api +import "context" + func (c *Sys) Health() (*HealthResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/health") // If the code is 400 or above it will automatically turn into an error, @@ -9,7 +11,11 @@ func (c *Sys) Health() (*HealthResponse, error) { r.Params.Add("sealedcode", "299") r.Params.Add("standbycode", "299") r.Params.Add("drsecondarycode", "299") - resp, err := c.c.RawRequest(r) + r.Params.Add("performancestandbycode", "299") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_init.go b/vendor/github.com/hashicorp/vault/api/sys_init.go index f824ab7ddbed..0e499c6e3c63 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_init.go +++ b/vendor/github.com/hashicorp/vault/api/sys_init.go @@ -1,8 +1,13 @@ package api +import "context" + func (c *Sys) InitStatus() (bool, error) { r := c.c.NewRequest("GET", "/v1/sys/init") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return false, err } @@ -19,7 +24,9 @@ func (c *Sys) Init(opts *InitRequest) (*InitResponse, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go index 4951c46e1809..dfef8345cc5a 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leader.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go @@ -1,8 +1,13 @@ package api +import "context" + func (c *Sys) Leader() (*LeaderResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/leader") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -14,8 +19,10 @@ func (c *Sys) Leader() (*LeaderResponse, error) { } type LeaderResponse struct { - HAEnabled bool `json:"ha_enabled"` - IsSelf bool `json:"is_self"` - LeaderAddress string `json:"leader_address"` - LeaderClusterAddress string `json:"leader_cluster_address"` + HAEnabled bool `json:"ha_enabled"` + IsSelf bool `json:"is_self"` + LeaderAddress string `json:"leader_address"` + LeaderClusterAddress string `json:"leader_cluster_address"` + PerfStandby bool `json:"performance_standby"` + PerfStandbyLastRemoteWAL uint64 `json:"performance_standby_last_remote_wal"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_leases.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go index 34bd99e65223..09c9642a95d8 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_leases.go +++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go @@ -1,5 +1,10 @@ package api +import ( + "context" + "errors" +) + func (c *Sys) Renew(id string, increment int) (*Secret, error) { r := c.c.NewRequest("PUT", "/v1/sys/leases/renew") @@ -11,7 +16,9 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -22,7 +29,10 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) { func (c *Sys) Revoke(id string) error { r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -31,7 +41,10 @@ func (c *Sys) Revoke(id string) error { func (c *Sys) RevokePrefix(id string) error { r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -40,9 +53,53 @@ func (c *Sys) RevokePrefix(id string) error { func (c *Sys) RevokeForce(id string) error { r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RevokeWithOptions(opts *RevokeOptions) error { + if opts == nil { + return errors.New("nil options provided") + } + + // Construct path + path := "/v1/sys/leases/revoke/" + switch { + case opts.Force: + path = "/v1/sys/leases/revoke-force/" + case opts.Prefix: + path = "/v1/sys/leases/revoke-prefix/" + } + path += opts.LeaseID + + r := c.c.NewRequest("PUT", path) + if !opts.Force { + body := map[string]interface{}{ + "sync": opts.Sync, + } + if err := r.SetJSONBody(body); err != nil { + return err + } + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } return err } + +type RevokeOptions struct { + LeaseID string + Force bool + Prefix bool + Sync bool +} diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go index 1d14f2175363..8a32b095e6ac 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go +++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go @@ -1,57 +1,50 @@ package api import ( + "context" + "errors" "fmt" - "github.com/fatih/structs" "github.com/mitchellh/mapstructure" ) func (c *Sys) ListMounts() (map[string]*MountOutput, error) { r := c.c.NewRequest("GET", "/v1/sys/mounts") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() - var result map[string]interface{} - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } mounts := map[string]*MountOutput{} - for k, v := range result { - switch v.(type) { - case map[string]interface{}: - default: - continue - } - var res MountOutput - err = mapstructure.Decode(v, &res) - if err != nil { - return nil, err - } - // Not a mount, some other api.Secret data - if res.Type == "" { - continue - } - mounts[k] = &res + err = mapstructure.Decode(secret.Data, &mounts) + if err != nil { + return nil, err } return mounts, nil } func (c *Sys) Mount(path string, mountInfo *MountInput) error { - body := structs.Map(mountInfo) - r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s", path)) - if err := r.SetJSONBody(body); err != nil { + if err := r.SetJSONBody(mountInfo); err != nil { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } @@ -62,7 +55,10 @@ func (c *Sys) Mount(path string, mountInfo *MountInput) error { func (c *Sys) Unmount(path string) error { r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/mounts/%s", path)) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -80,7 +76,9 @@ func (c *Sys) Remount(from, to string) error { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -88,13 +86,14 @@ func (c *Sys) Remount(from, to string) error { } func (c *Sys) TuneMount(path string, config MountConfigInput) error { - body := structs.Map(config) r := c.c.NewRequest("POST", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) - if err := r.SetJSONBody(body); err != nil { + if err := r.SetJSONBody(config); err != nil { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -104,14 +103,24 @@ func (c *Sys) TuneMount(path string, config MountConfigInput) error { func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/mounts/%s/tune", path)) - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + var result MountConfigOutput - err = resp.DecodeJSON(&result) + err = mapstructure.Decode(secret.Data, &result) if err != nil { return nil, err } @@ -120,44 +129,45 @@ func (c *Sys) MountConfig(path string) (*MountConfigOutput, error) { } type MountInput struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Config MountConfigInput `json:"config" structs:"config"` - Options map[string]string `json:"options" structs:"options"` - Local bool `json:"local" structs:"local"` - PluginName string `json:"plugin_name,omitempty" structs:"plugin_name"` - SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap"` + Type string `json:"type"` + Description string `json:"description"` + Config MountConfigInput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + PluginName string `json:"plugin_name,omitempty"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` } type MountConfigInput struct { - Options map[string]string `json:"options" structs:"options" mapstructure:"options"` - DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` - PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"` + Options map[string]string `json:"options" mapstructure:"options"` + DefaultLeaseTTL string `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + Description *string `json:"description,omitempty" mapstructure:"description"` + MaxLeaseTTL string `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` } type MountOutput struct { - Type string `json:"type" structs:"type"` - Description string `json:"description" structs:"description"` - Accessor string `json:"accessor" structs:"accessor"` - Config MountConfigOutput `json:"config" structs:"config"` - Options map[string]string `json:"options" structs:"options"` - Local bool `json:"local" structs:"local"` - SealWrap bool `json:"seal_wrap" structs:"seal_wrap" mapstructure:"seal_wrap"` + Type string `json:"type"` + Description string `json:"description"` + Accessor string `json:"accessor"` + Config MountConfigOutput `json:"config"` + Options map[string]string `json:"options"` + Local bool `json:"local"` + SealWrap bool `json:"seal_wrap" mapstructure:"seal_wrap"` } type MountConfigOutput struct { - DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"` - MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"` - ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"` - PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"` - AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" structs:"audit_non_hmac_request_keys" mapstructure:"audit_non_hmac_request_keys"` - AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" structs:"audit_non_hmac_response_keys" mapstructure:"audit_non_hmac_response_keys"` - ListingVisibility string `json:"listing_visibility,omitempty" structs:"listing_visibility" mapstructure:"listing_visibility"` - PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" structs:"passthrough_request_headers" mapstructure:"passthrough_request_headers"` + DefaultLeaseTTL int `json:"default_lease_ttl" mapstructure:"default_lease_ttl"` + MaxLeaseTTL int `json:"max_lease_ttl" mapstructure:"max_lease_ttl"` + ForceNoCache bool `json:"force_no_cache" mapstructure:"force_no_cache"` + PluginName string `json:"plugin_name,omitempty" mapstructure:"plugin_name"` + AuditNonHMACRequestKeys []string `json:"audit_non_hmac_request_keys,omitempty" mapstructure:"audit_non_hmac_request_keys"` + AuditNonHMACResponseKeys []string `json:"audit_non_hmac_response_keys,omitempty" mapstructure:"audit_non_hmac_response_keys"` + ListingVisibility string `json:"listing_visibility,omitempty" mapstructure:"listing_visibility"` + PassthroughRequestHeaders []string `json:"passthrough_request_headers,omitempty" mapstructure:"passthrough_request_headers"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_plugins.go b/vendor/github.com/hashicorp/vault/api/sys_plugins.go index 8183b10f5b77..b2f18d94d769 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_plugins.go +++ b/vendor/github.com/hashicorp/vault/api/sys_plugins.go @@ -1,6 +1,7 @@ package api import ( + "context" "fmt" "net/http" ) @@ -11,7 +12,7 @@ type ListPluginsInput struct{} // ListPluginsResponse is the response from the ListPlugins call. type ListPluginsResponse struct { // Names is the list of names of the plugins. - Names []string + Names []string `json:"names"` } // ListPlugins lists all plugins in the catalog and returns their names as a @@ -19,7 +20,10 @@ type ListPluginsResponse struct { func (c *Sys) ListPlugins(i *ListPluginsInput) (*ListPluginsResponse, error) { path := "/v1/sys/plugins/catalog" req := c.c.NewRequest("LIST", path) - resp, err := c.c.RawRequest(req) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) if err != nil { return nil, err } @@ -54,18 +58,23 @@ type GetPluginResponse struct { func (c *Sys) GetPlugin(i *GetPluginInput) (*GetPluginResponse, error) { path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name) req := c.c.NewRequest(http.MethodGet, path) - resp, err := c.c.RawRequest(req) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) if err != nil { return nil, err } defer resp.Body.Close() - var result GetPluginResponse + var result struct { + Data GetPluginResponse + } err = resp.DecodeJSON(&result) if err != nil { return nil, err } - return &result, err + return &result.Data, err } // RegisterPluginInput is used as input to the RegisterPlugin function. @@ -91,7 +100,9 @@ func (c *Sys) RegisterPlugin(i *RegisterPluginInput) error { return err } - resp, err := c.c.RawRequest(req) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) if err == nil { defer resp.Body.Close() } @@ -109,7 +120,10 @@ type DeregisterPluginInput struct { func (c *Sys) DeregisterPlugin(i *DeregisterPluginInput) error { path := fmt.Sprintf("/v1/sys/plugins/catalog/%s", i.Name) req := c.c.NewRequest(http.MethodDelete, path) - resp, err := c.c.RawRequest(req) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, req) if err == nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/sys_policy.go b/vendor/github.com/hashicorp/vault/api/sys_policy.go index 9c9d9c08b125..1fa32597efb2 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_policy.go +++ b/vendor/github.com/hashicorp/vault/api/sys_policy.go @@ -1,39 +1,47 @@ package api -import "fmt" +import ( + "context" + "errors" + "fmt" + + "github.com/mitchellh/mapstructure" +) func (c *Sys) ListPolicies() ([]string, error) { r := c.c.NewRequest("GET", "/v1/sys/policy") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() - var result map[string]interface{} - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return nil, err } - - var ok bool - if _, ok = result["policies"]; !ok { - return nil, fmt.Errorf("policies not found in response") + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") } - listRaw := result["policies"].([]interface{}) - var policies []string - - for _, val := range listRaw { - policies = append(policies, val.(string)) + var result []string + err = mapstructure.Decode(secret.Data["policies"], &result) + if err != nil { + return nil, err } - return policies, err + return result, err } func (c *Sys) GetPolicy(name string) (string, error) { - r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policy/%s", name)) - resp, err := c.c.RawRequest(r) + r := c.c.NewRequest("GET", fmt.Sprintf("/v1/sys/policies/acl/%s", name)) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if resp != nil { defer resp.Body.Close() if resp.StatusCode == 404 { @@ -44,16 +52,15 @@ func (c *Sys) GetPolicy(name string) (string, error) { return "", err } - var result map[string]interface{} - err = resp.DecodeJSON(&result) + secret, err := ParseSecret(resp.Body) if err != nil { return "", err } - - if rulesRaw, ok := result["rules"]; ok { - return rulesRaw.(string), nil + if secret == nil || secret.Data == nil { + return "", errors.New("data from server response is empty") } - if policyRaw, ok := result["policy"]; ok { + + if policyRaw, ok := secret.Data["policy"]; ok { return policyRaw.(string), nil } @@ -70,7 +77,9 @@ func (c *Sys) PutPolicy(name, rules string) error { return err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return err } @@ -81,7 +90,10 @@ func (c *Sys) PutPolicy(name, rules string) error { func (c *Sys) DeletePolicy(name string) error { r := c.c.NewRequest("DELETE", fmt.Sprintf("/v1/sys/policy/%s", name)) - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } diff --git a/vendor/github.com/hashicorp/vault/api/sys_rekey.go b/vendor/github.com/hashicorp/vault/api/sys_rekey.go index 8b2d0435d042..55f1a703d41c 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_rekey.go +++ b/vendor/github.com/hashicorp/vault/api/sys_rekey.go @@ -1,8 +1,18 @@ package api +import ( + "context" + "errors" + + "github.com/mitchellh/mapstructure" +) + func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/init") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -15,7 +25,10 @@ func (c *Sys) RekeyStatus() (*RekeyStatusResponse, error) { func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/init") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -26,13 +39,47 @@ func (c *Sys) RekeyRecoveryKeyStatus() (*RekeyStatusResponse, error) { return &result, err } +func (c *Sys) RekeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationStatus() (*RekeyVerificationStatusResponse, error) { + r := c.c.NewRequest("GET", "/v1/sys/rekey-recovery-key/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationStatusResponse + err = resp.DecodeJSON(&result) + return &result, err +} + func (c *Sys) RekeyInit(config *RekeyInitRequest) (*RekeyStatusResponse, error) { r := c.c.NewRequest("PUT", "/v1/sys/rekey/init") if err := r.SetJSONBody(config); err != nil { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -49,7 +96,9 @@ func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusRespon return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -62,7 +111,10 @@ func (c *Sys) RekeyRecoveryKeyInit(config *RekeyInitRequest) (*RekeyStatusRespon func (c *Sys) RekeyCancel() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/init") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -71,7 +123,34 @@ func (c *Sys) RekeyCancel() error { func (c *Sys) RekeyRecoveryKeyCancel() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/init") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyVerificationCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err == nil { + defer resp.Body.Close() + } + return err +} + +func (c *Sys) RekeyRecoveryKeyVerificationCancel() error { + r := c.c.NewRequest("DELETE", "/v1/sys/rekey-recovery-key/verify") + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -89,7 +168,9 @@ func (c *Sys) RekeyUpdate(shard, nonce string) (*RekeyUpdateResponse, error) { return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -111,7 +192,9 @@ func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, return nil, err } - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } @@ -124,33 +207,66 @@ func (c *Sys) RekeyRecoveryKeyUpdate(shard, nonce string) (*RekeyUpdateResponse, func (c *Sys) RekeyRetrieveBackup() (*RekeyRetrieveResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/backup") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + var result RekeyRetrieveResponse - err = resp.DecodeJSON(&result) + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + return &result, err } func (c *Sys) RekeyRetrieveRecoveryBackup() (*RekeyRetrieveResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/rekey/recovery-backup") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + var result RekeyRetrieveResponse - err = resp.DecodeJSON(&result) + err = mapstructure.Decode(secret.Data, &result) + if err != nil { + return nil, err + } + return &result, err } func (c *Sys) RekeyDeleteBackup() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/backup") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -160,7 +276,10 @@ func (c *Sys) RekeyDeleteBackup() error { func (c *Sys) RekeyDeleteRecoveryBackup() error { r := c.c.NewRequest("DELETE", "/v1/sys/rekey/recovery-backup") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -168,36 +287,102 @@ func (c *Sys) RekeyDeleteRecoveryBackup() error { return err } +func (c *Sys) RekeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + +func (c *Sys) RekeyRecoveryKeyVerificationUpdate(shard, nonce string) (*RekeyVerificationUpdateResponse, error) { + body := map[string]interface{}{ + "key": shard, + "nonce": nonce, + } + + r := c.c.NewRequest("PUT", "/v1/sys/rekey-recovery-key/verify") + if err := r.SetJSONBody(body); err != nil { + return nil, err + } + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result RekeyVerificationUpdateResponse + err = resp.DecodeJSON(&result) + return &result, err +} + type RekeyInitRequest struct { - SecretShares int `json:"secret_shares"` - SecretThreshold int `json:"secret_threshold"` - StoredShares int `json:"stored_shares"` - PGPKeys []string `json:"pgp_keys"` - Backup bool + SecretShares int `json:"secret_shares"` + SecretThreshold int `json:"secret_threshold"` + StoredShares int `json:"stored_shares"` + PGPKeys []string `json:"pgp_keys"` + Backup bool + RequireVerification bool `json:"require_verification"` } type RekeyStatusResponse struct { - Nonce string `json:"nonce"` - Started bool `json:"started"` - T int `json:"t"` - N int `json:"n"` - Progress int `json:"progress"` - Required int `json:"required"` - PGPFingerprints []string `json:"pgp_fingerprints"` - Backup bool `json:"backup"` + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` + Required int `json:"required"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce"` } type RekeyUpdateResponse struct { - Nonce string `json:"nonce"` - Complete bool `json:"complete"` - Keys []string `json:"keys"` - KeysB64 []string `json:"keys_base64"` - PGPFingerprints []string `json:"pgp_fingerprints"` - Backup bool `json:"backup"` + Nonce string `json:"nonce"` + Complete bool `json:"complete"` + Keys []string `json:"keys"` + KeysB64 []string `json:"keys_base64"` + PGPFingerprints []string `json:"pgp_fingerprints"` + Backup bool `json:"backup"` + VerificationRequired bool `json:"verification_required"` + VerificationNonce string `json:"verification_nonce,omitempty"` } type RekeyRetrieveResponse struct { - Nonce string `json:"nonce"` - Keys map[string][]string `json:"keys"` - KeysB64 map[string][]string `json:"keys_base64"` + Nonce string `json:"nonce" mapstructure:"nonce"` + Keys map[string][]string `json:"keys" mapstructure:"keys"` + KeysB64 map[string][]string `json:"keys_base64" mapstructure:"keys_base64"` +} + +type RekeyVerificationStatusResponse struct { + Nonce string `json:"nonce"` + Started bool `json:"started"` + T int `json:"t"` + N int `json:"n"` + Progress int `json:"progress"` +} + +type RekeyVerificationUpdateResponse struct { + Nonce string `json:"nonce"` + Complete bool `json:"complete"` } diff --git a/vendor/github.com/hashicorp/vault/api/sys_rotate.go b/vendor/github.com/hashicorp/vault/api/sys_rotate.go index 8108dced8286..c525feb00d3a 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_rotate.go +++ b/vendor/github.com/hashicorp/vault/api/sys_rotate.go @@ -1,10 +1,18 @@ package api -import "time" +import ( + "context" + "encoding/json" + "errors" + "time" +) func (c *Sys) Rotate() error { r := c.c.NewRequest("POST", "/v1/sys/rotate") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -13,15 +21,54 @@ func (c *Sys) Rotate() error { func (c *Sys) KeyStatus() (*KeyStatus, error) { r := c.c.NewRequest("GET", "/v1/sys/key-status") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } defer resp.Body.Close() - result := new(KeyStatus) - err = resp.DecodeJSON(result) - return result, err + secret, err := ParseSecret(resp.Body) + if err != nil { + return nil, err + } + if secret == nil || secret.Data == nil { + return nil, errors.New("data from server response is empty") + } + + var result KeyStatus + + termRaw, ok := secret.Data["term"] + if !ok { + return nil, errors.New("term not found in response") + } + term, ok := termRaw.(json.Number) + if !ok { + return nil, errors.New("could not convert term to a number") + } + term64, err := term.Int64() + if err != nil { + return nil, err + } + result.Term = int(term64) + + installTimeRaw, ok := secret.Data["install_time"] + if !ok { + return nil, errors.New("install_time not found in response") + } + installTimeStr, ok := installTimeRaw.(string) + if !ok { + return nil, errors.New("could not convert install_time to a string") + } + installTime, err := time.Parse(time.RFC3339Nano, installTimeStr) + if err != nil { + return nil, err + } + result.InstallTime = installTime + + return &result, err } type KeyStatus struct { diff --git a/vendor/github.com/hashicorp/vault/api/sys_seal.go b/vendor/github.com/hashicorp/vault/api/sys_seal.go index 3d594baf914b..7cc32ac33c8d 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_seal.go +++ b/vendor/github.com/hashicorp/vault/api/sys_seal.go @@ -1,5 +1,7 @@ package api +import "context" + func (c *Sys) SealStatus() (*SealStatusResponse, error) { r := c.c.NewRequest("GET", "/v1/sys/seal-status") return sealStatusRequest(c, r) @@ -7,7 +9,10 @@ func (c *Sys) SealStatus() (*SealStatusResponse, error) { func (c *Sys) Seal() error { r := c.c.NewRequest("PUT", "/v1/sys/seal") - resp, err := c.c.RawRequest(r) + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err == nil { defer resp.Body.Close() } @@ -37,7 +42,9 @@ func (c *Sys) Unseal(shard string) (*SealStatusResponse, error) { } func sealStatusRequest(c *Sys, r *Request) (*SealStatusResponse, error) { - resp, err := c.c.RawRequest(r) + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) if err != nil { return nil, err } diff --git a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go index 421e5f19fb96..55dc6fbcb7bd 100644 --- a/vendor/github.com/hashicorp/vault/api/sys_stepdown.go +++ b/vendor/github.com/hashicorp/vault/api/sys_stepdown.go @@ -1,10 +1,15 @@ package api +import "context" + func (c *Sys) StepDown() error { r := c.c.NewRequest("PUT", "/v1/sys/step-down") - resp, err := c.c.RawRequest(r) - if err == nil { - defer resp.Body.Close() + + ctx, cancelFunc := context.WithCancel(context.Background()) + defer cancelFunc() + resp, err := c.c.RawRequestWithContext(ctx, r) + if resp != nil && resp.Body != nil { + resp.Body.Close() } return err } diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go index 4acebe31ca39..a7fb87bcffff 100644 --- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go +++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go @@ -8,6 +8,7 @@ import ( "io" "github.com/golang/snappy" + "github.com/hashicorp/errwrap" ) const ( @@ -107,7 +108,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { } if err != nil { - return nil, fmt.Errorf("failed to create a compression writer; err: %v", err) + return nil, errwrap.Wrapf("failed to create a compression writer: {{err}}", err) } if writer == nil { @@ -117,7 +118,7 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) { // Compress the input and place it in the same buffer containing the // canary byte. if _, err = writer.Write(data); err != nil { - return nil, fmt.Errorf("failed to compress input data; err: %v", err) + return nil, errwrap.Wrapf("failed to compress input data: err: {{err}}", err) } // Close the io.WriteCloser @@ -172,7 +173,7 @@ func Decompress(data []byte) ([]byte, bool, error) { return nil, true, nil } if err != nil { - return nil, false, fmt.Errorf("failed to create a compression reader; err: %v", err) + return nil, false, errwrap.Wrapf("failed to create a compression reader: {{err}}", err) } if reader == nil { return nil, false, fmt.Errorf("failed to create a compression reader") diff --git a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go index b560279bdc84..d03ddef5f068 100644 --- a/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go +++ b/vendor/github.com/hashicorp/vault/helper/jsonutil/json.go @@ -7,6 +7,7 @@ import ( "fmt" "io" + "github.com/hashicorp/errwrap" "github.com/hashicorp/vault/helper/compressutil" ) @@ -64,7 +65,7 @@ func DecodeJSON(data []byte, out interface{}) error { // Decompress the data if it was compressed in the first place decompressedBytes, uncompressed, err := compressutil.Decompress(data) if err != nil { - return fmt.Errorf("failed to decompress JSON: err: %v", err) + return errwrap.Wrapf("failed to decompress JSON: {{err}}", err) } if !uncompressed && (decompressedBytes == nil || len(decompressedBytes) == 0) { return fmt.Errorf("decompressed data being decoded is invalid") diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go index 464b50899cf9..9b32bf7df444 100644 --- a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go +++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go @@ -3,10 +3,13 @@ package parseutil import ( "encoding/json" "errors" + "fmt" "strconv" "strings" "time" + "github.com/hashicorp/errwrap" + sockaddr "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/vault/helper/strutil" "github.com/mitchellh/mapstructure" ) @@ -25,7 +28,7 @@ func ParseDurationSecond(in interface{}) (time.Duration, error) { } var err error // Look for a suffix otherwise its a plain second value - if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") { + if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") || strings.HasSuffix(inp, "ms") { dur, err = time.ParseDuration(inp) if err != nil { return dur, err @@ -118,3 +121,43 @@ func ParseCommaStringSlice(in interface{}) ([]string, error) { } return strutil.TrimStrings(result), nil } + +func ParseAddrs(addrs interface{}) ([]*sockaddr.SockAddrMarshaler, error) { + out := make([]*sockaddr.SockAddrMarshaler, 0) + stringAddrs := make([]string, 0) + + switch addrs.(type) { + case string: + stringAddrs = strutil.ParseArbitraryStringSlice(addrs.(string), ",") + if len(stringAddrs) == 0 { + return nil, fmt.Errorf("unable to parse addresses from %v", addrs) + } + + case []string: + stringAddrs = addrs.([]string) + + case []interface{}: + for _, v := range addrs.([]interface{}) { + stringAddr, ok := v.(string) + if !ok { + return nil, fmt.Errorf("error parsing %v as string", v) + } + stringAddrs = append(stringAddrs, stringAddr) + } + + default: + return nil, fmt.Errorf("unknown address input type %T", addrs) + } + + for _, addr := range stringAddrs { + sa, err := sockaddr.NewSockAddr(addr) + if err != nil { + return nil, errwrap.Wrapf(fmt.Sprintf("error parsing address %q: {{err}}", addr), err) + } + out = append(out, &sockaddr.SockAddrMarshaler{ + SockAddr: sa, + }) + } + + return out, nil +} diff --git a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go index ec6166cc7c78..8d84c1e47dee 100644 --- a/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go +++ b/vendor/github.com/hashicorp/vault/helper/strutil/strutil.go @@ -7,6 +7,7 @@ import ( "sort" "strings" + "github.com/hashicorp/errwrap" glob "github.com/ryanuber/go-glob" ) @@ -42,9 +43,9 @@ func StrListSubset(super, sub []string) bool { return true } -// Parses a comma separated list of strings into a slice of strings. -// The return slice will be sorted and will not contain duplicate or -// empty items. +// ParseDedupAndSortStrings parses a comma separated list of strings +// into a slice of strings. The return slice will be sorted and will +// not contain duplicate or empty items. func ParseDedupAndSortStrings(input string, sep string) []string { input = strings.TrimSpace(input) parsed := []string{} @@ -55,9 +56,10 @@ func ParseDedupAndSortStrings(input string, sep string) []string { return RemoveDuplicates(strings.Split(input, sep), false) } -// Parses a comma separated list of strings into a slice of strings. -// The return slice will be sorted and will not contain duplicate or -// empty items. The values will be converted to lower case. +// ParseDedupLowercaseAndSortStrings parses a comma separated list of +// strings into a slice of strings. The return slice will be sorted and +// will not contain duplicate or empty items. The values will be converted +// to lower case. func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { input = strings.TrimSpace(input) parsed := []string{} @@ -68,8 +70,8 @@ func ParseDedupLowercaseAndSortStrings(input string, sep string) []string { return RemoveDuplicates(strings.Split(input, sep), true) } -// Parses a comma separated list of `=` tuples into a -// map[string]string. +// ParseKeyValues parses a comma separated list of `=` tuples +// into a map[string]string. func ParseKeyValues(input string, out map[string]string, sep string) error { if out == nil { return fmt.Errorf("'out is nil") @@ -89,15 +91,15 @@ func ParseKeyValues(input string, out map[string]string, sep string) error { key := strings.TrimSpace(shards[0]) value := strings.TrimSpace(shards[1]) if key == "" || value == "" { - return fmt.Errorf("invalid pair: key:'%s' value:'%s'", key, value) + return fmt.Errorf("invalid pair: key: %q value: %q", key, value) } out[key] = value } return nil } -// Parses arbitrary tuples. The input can be one of -// the following: +// ParseArbitraryKeyValues parses arbitrary tuples. The input +// can be one of the following: // * JSON string // * Base64 encoded JSON string // * Comma separated list of `=` pairs @@ -129,22 +131,22 @@ func ParseArbitraryKeyValues(input string, out map[string]string, sep string) er // If JSON unmarshalling fails, consider that the input was // supplied as a comma separated string of 'key=value' pairs. if err = ParseKeyValues(input, out, sep); err != nil { - return fmt.Errorf("failed to parse the input: %v", err) + return errwrap.Wrapf("failed to parse the input: {{err}}", err) } } // Validate the parsed input for key, value := range out { if key != "" && value == "" { - return fmt.Errorf("invalid value for key '%s'", key) + return fmt.Errorf("invalid value for key %q", key) } } return nil } -// Parses a `sep`-separated list of strings into a -// []string. +// ParseStringSlice parses a `sep`-separated list of strings into a +// []string with surrounding whitespace removed. // // The output will always be a valid slice but may be of length zero. func ParseStringSlice(input string, sep string) []string { @@ -156,14 +158,14 @@ func ParseStringSlice(input string, sep string) []string { splitStr := strings.Split(input, sep) ret := make([]string, len(splitStr)) for i, val := range splitStr { - ret[i] = val + ret[i] = strings.TrimSpace(val) } return ret } -// Parses arbitrary string slice. The input can be one of -// the following: +// ParseArbitraryStringSlice parses arbitrary string slice. The input +// can be one of the following: // * JSON string // * Base64 encoded JSON string // * `sep` separated list of values @@ -214,8 +216,9 @@ func TrimStrings(items []string) []string { return ret } -// Removes duplicate and empty elements from a slice of strings. This also may -// convert the items in the slice to lower case and returns a sorted slice. +// RemoveDuplicates removes duplicate and empty elements from a slice of +// strings. This also may convert the items in the slice to lower case and +// returns a sorted slice. func RemoveDuplicates(items []string, lowercase bool) []string { itemsMap := map[string]bool{} for _, item := range items { @@ -229,7 +232,7 @@ func RemoveDuplicates(items []string, lowercase bool) []string { itemsMap[item] = true } items = make([]string, 0, len(itemsMap)) - for item, _ := range itemsMap { + for item := range itemsMap { items = append(items, item) } sort.Strings(items) @@ -259,10 +262,10 @@ func EquivalentSlices(a, b []string) bool { // Now we'll build our checking slices var sortedA, sortedB []string - for keyA, _ := range mapA { + for keyA := range mapA { sortedA = append(sortedA, keyA) } - for keyB, _ := range mapB { + for keyB := range mapB { sortedB = append(sortedB, keyB) } sort.Strings(sortedA) @@ -298,6 +301,8 @@ func StrListDelete(s []string, d string) []string { return s } +// GlobbedStringsMatch compares item to val with support for a leading and/or +// trailing wildcard '*' in item. func GlobbedStringsMatch(item, val string) bool { if len(item) < 2 { return val == item @@ -324,3 +329,20 @@ func AppendIfMissing(slice []string, i string) []string { } return append(slice, i) } + +// MergeSlices adds an arbitrary number of slices together, uniquely +func MergeSlices(args ...[]string) []string { + all := map[string]struct{}{} + for _, slice := range args { + for _, v := range slice { + all[v] = struct{}{} + } + } + + result := make([]string, 0, len(all)) + for k, _ := range all { + result = append(result, k) + } + sort.Strings(result) + return result +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 2a3aafaada9f..c2463e23cc5c 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -196,11 +196,11 @@ {"path":"github.com/hashicorp/serf/coordinate","checksumSHA1":"0PeWsO2aI+2PgVYlYlDPKfzCLEQ=","revision":"80ab48778deee28e4ea2dc4ef1ebb2c5f4063996","revisionTime":"2018-05-07T23:19:28Z"}, {"path":"github.com/hashicorp/serf/serf","checksumSHA1":"QrT+nzyXsD/MmhTjjhcPdnALZ1I=","revision":"80ab48778deee28e4ea2dc4ef1ebb2c5f4063996","revisionTime":"2018-05-07T23:19:28Z"}, {"path":"github.com/hashicorp/vault","checksumSHA1":"eGzvBRMFD6ZB3A6uO750np7Om/E=","revision":"182ba68a9589d4cef95234134aaa498a686e3de3","revisionTime":"2016-08-21T23:40:57Z"}, - {"path":"github.com/hashicorp/vault/api","checksumSHA1":"mKN4rEIWyflT6aqJyjgu9m1tPXI=","revision":"3ddd3bd20cec0588788547aecd15e91461b9d546","revisionTime":"2018-04-03T21:11:47Z"}, - {"path":"github.com/hashicorp/vault/helper/compressutil","checksumSHA1":"jHVLe8KMdEpb/ZALp0zu+tenADo=","revision":"3ddd3bd20cec0588788547aecd15e91461b9d546","revisionTime":"2018-04-03T21:11:47Z"}, - {"path":"github.com/hashicorp/vault/helper/jsonutil","checksumSHA1":"TEViSweHazfDVJ/4Y+luMnNMiqY=","revision":"3ddd3bd20cec0588788547aecd15e91461b9d546","revisionTime":"2018-04-03T21:11:47Z"}, - {"path":"github.com/hashicorp/vault/helper/parseutil","checksumSHA1":"6OrIfQ/Lr5hNyZ9oU/JQvfd2Bto=","revision":"3ddd3bd20cec0588788547aecd15e91461b9d546","revisionTime":"2018-04-03T21:11:47Z"}, - {"path":"github.com/hashicorp/vault/helper/strutil","checksumSHA1":"rXiSGn0TsznSSCvVlt7fvXKMF1M=","revision":"3ddd3bd20cec0588788547aecd15e91461b9d546","revisionTime":"2018-04-03T21:11:47Z"}, + {"path":"github.com/hashicorp/vault/api","checksumSHA1":"+B4wuJNerIUKNAVzld7CmMaNW5A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, + {"path":"github.com/hashicorp/vault/helper/compressutil","checksumSHA1":"bSdPFOHaTwEvM4PIvn0PZfn75jM=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, + {"path":"github.com/hashicorp/vault/helper/jsonutil","checksumSHA1":"POgkM3GrjRFw6H3sw95YNEs552A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, + {"path":"github.com/hashicorp/vault/helper/parseutil","checksumSHA1":"HA2MV/2XI0HcoThSRxQCaBZR2ps=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, + {"path":"github.com/hashicorp/vault/helper/strutil","checksumSHA1":"HdVuYhZ5TuxeIFqi0jy2GHW7a4o=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/yamux","checksumSHA1":"NnWv17i1tpvBNJtpdRRWpE6j4LY=","revision":"2658be15c5f05e76244154714161f17e3e77de2e","revisionTime":"2018-03-14T20:07:45Z"}, {"path":"github.com/hpcloud/tail/util","checksumSHA1":"0xM336Lb25URO/1W1/CtGoRygVU=","revision":"37f4271387456dd1bf82ab1ad9229f060cc45386","revisionTime":"2017-08-14T16:06:53Z"}, {"path":"github.com/hpcloud/tail/watch","checksumSHA1":"TP4OAv5JMtzj2TB6OQBKqauaKDc=","revision":"37f4271387456dd1bf82ab1ad9229f060cc45386","revisionTime":"2017-08-14T16:06:53Z"}, From 8ebb17f54debcd4923887c9f7cbbb38e08f00502 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 10 Sep 2018 15:02:05 -0700 Subject: [PATCH 2/6] Update retryablehttp --- .../hashicorp/go-retryablehttp/README.md | 13 +- .../hashicorp/go-retryablehttp/client.go | 298 +++++++++++++++--- vendor/vendor.json | 2 +- 3 files changed, 257 insertions(+), 56 deletions(-) diff --git a/vendor/github.com/hashicorp/go-retryablehttp/README.md b/vendor/github.com/hashicorp/go-retryablehttp/README.md index 0d6f9ed40afd..ccdc7e87cad7 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/README.md +++ b/vendor/github.com/hashicorp/go-retryablehttp/README.md @@ -14,13 +14,16 @@ makes `retryablehttp` very easy to drop into existing programs. `retryablehttp` performs automatic retries under certain conditions. Mainly, if an error is returned by the client (connection errors, etc.), or if a 500-range -response code is received, then a retry is invoked after a wait period. -Otherwise, the response is returned and left to the caller to interpret. +response code is received (except 501), then a retry is invoked after a wait +period. Otherwise, the response is returned and left to the caller to +interpret. The main difference from `net/http` is that requests which take a request body -(POST/PUT et. al) require an `io.ReadSeeker` to be provided. This enables the -request body to be "rewound" if the initial request fails so that the full -request can be attempted again. +(POST/PUT et. al) can have the body provided in a number of ways (some more or +less efficient) that allow "rewinding" the request body if the initial request +fails so that the full request can be attempted again. See the +[godoc](http://godoc.org/github.com/hashicorp/go-retryablehttp) for more +details. Example Use =========== diff --git a/vendor/github.com/hashicorp/go-retryablehttp/client.go b/vendor/github.com/hashicorp/go-retryablehttp/client.go index 198779bdf978..21f45e5ed647 100644 --- a/vendor/github.com/hashicorp/go-retryablehttp/client.go +++ b/vendor/github.com/hashicorp/go-retryablehttp/client.go @@ -8,18 +8,28 @@ // response is received, then a retry is invoked. Otherwise, the response is // returned and left to the caller to interpret. // -// The main difference from net/http is that requests which take a request body -// (POST/PUT et. al) require an io.ReadSeeker to be provided. This enables the -// request body to be "rewound" if the initial request fails so that the full -// request can be attempted again. +// Requests which take a request body should provide a non-nil function +// parameter. The best choice is to provide either a function satisfying +// ReaderFunc which provides multiple io.Readers in an efficient manner, a +// *bytes.Buffer (the underlying raw byte slice will be used) or a raw byte +// slice. As it is a reference type, and we will wrap it as needed by readers, +// we can efficiently re-use the request body without needing to copy it. If an +// io.Reader (such as a *bytes.Reader) is provided, the full body will be read +// prior to the first request, and will be efficiently re-used for any retries. +// ReadSeeker can be used, but some users have observed occasional data races +// between the net/http library and the Seek functionality of some +// implementations of ReadSeeker, so should be avoided if possible. package retryablehttp import ( + "bytes" + "context" "fmt" "io" "io/ioutil" "log" "math" + "math/rand" "net/http" "net/url" "os" @@ -44,6 +54,9 @@ var ( respReadLimit = int64(4096) ) +// ReaderFunc is the type of function that can be given natively to NewRequest +type ReaderFunc func() (io.Reader, error) + // LenReader is an interface implemented by many in-memory io.Reader's. Used // for automatically sending the right Content-Length header when possible. type LenReader interface { @@ -54,32 +67,118 @@ type LenReader interface { type Request struct { // body is a seekable reader over the request body payload. This is // used to rewind the request data in between retries. - body io.ReadSeeker + body ReaderFunc // Embed an HTTP request directly. This makes a *Request act exactly // like an *http.Request so that all meta methods are supported. *http.Request } +// WithContext returns wrapped Request with a shallow copy of underlying *http.Request +// with its context changed to ctx. The provided ctx must be non-nil. +func (r *Request) WithContext(ctx context.Context) *Request { + r.Request = r.Request.WithContext(ctx) + return r +} + // NewRequest creates a new wrapped request. -func NewRequest(method, url string, body io.ReadSeeker) (*Request, error) { - // Wrap the body in a noop ReadCloser if non-nil. This prevents the - // reader from being closed by the HTTP client. - var rcBody io.ReadCloser - if body != nil { - rcBody = ioutil.NopCloser(body) +func NewRequest(method, url string, rawBody interface{}) (*Request, error) { + var err error + var body ReaderFunc + var contentLength int64 + + if rawBody != nil { + switch rawBody.(type) { + // If they gave us a function already, great! Use it. + case ReaderFunc: + body = rawBody.(ReaderFunc) + tmp, err := body() + if err != nil { + return nil, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + case func() (io.Reader, error): + body = rawBody.(func() (io.Reader, error)) + tmp, err := body() + if err != nil { + return nil, err + } + if lr, ok := tmp.(LenReader); ok { + contentLength = int64(lr.Len()) + } + if c, ok := tmp.(io.Closer); ok { + c.Close() + } + + // If a regular byte slice, we can read it over and over via new + // readers + case []byte: + buf := rawBody.([]byte) + body = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // If a bytes.Buffer we can read the underlying byte slice over and + // over + case *bytes.Buffer: + buf := rawBody.(*bytes.Buffer) + body = func() (io.Reader, error) { + return bytes.NewReader(buf.Bytes()), nil + } + contentLength = int64(buf.Len()) + + // We prioritize *bytes.Reader here because we don't really want to + // deal with it seeking so want it to match here instead of the + // io.ReadSeeker case. + case *bytes.Reader: + buf, err := ioutil.ReadAll(rawBody.(*bytes.Reader)) + if err != nil { + return nil, err + } + body = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + // Compat case + case io.ReadSeeker: + raw := rawBody.(io.ReadSeeker) + body = func() (io.Reader, error) { + raw.Seek(0, 0) + return ioutil.NopCloser(raw), nil + } + if lr, ok := raw.(LenReader); ok { + contentLength = int64(lr.Len()) + } + + // Read all in so we can reset + case io.Reader: + buf, err := ioutil.ReadAll(rawBody.(io.Reader)) + if err != nil { + return nil, err + } + body = func() (io.Reader, error) { + return bytes.NewReader(buf), nil + } + contentLength = int64(len(buf)) + + default: + return nil, fmt.Errorf("cannot handle type %T", rawBody) + } } - // Make the request with the noop-closer for the body. - httpReq, err := http.NewRequest(method, url, rcBody) + httpReq, err := http.NewRequest(method, url, nil) if err != nil { return nil, err } - - // Check if we can set the Content-Length automatically. - if lr, ok := body.(LenReader); ok { - httpReq.ContentLength = int64(lr.Len()) - } + httpReq.ContentLength = contentLength return &Request{body, httpReq}, nil } @@ -105,7 +204,18 @@ type ResponseLogHook func(*log.Logger, *http.Response) // Client will close any response body when retrying, but if the retry is // aborted it is up to the CheckResponse callback to properly close any // response body before returning. -type CheckRetry func(resp *http.Response, err error) (bool, error) +type CheckRetry func(ctx context.Context, resp *http.Response, err error) (bool, error) + +// Backoff specifies a policy for how long to wait between retries. +// It is called after a failing request to determine the amount of time +// that should pass before trying again. +type Backoff func(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration + +// ErrorHandler is called if retries are expired, containing the last status +// from the http library. If not specified, default behavior for the library is +// to close the body and return an error indicating how many tries were +// attempted. If overriding this, be sure to close the body if needed. +type ErrorHandler func(resp *http.Response, err error, numTries int) (*http.Response, error) // Client is used to make HTTP requests. It adds additional functionality // like automatic retries to tolerate minor outages. @@ -128,6 +238,12 @@ type Client struct { // CheckRetry specifies the policy for handling retries, and is called // after each request. The default policy is DefaultRetryPolicy. CheckRetry CheckRetry + + // Backoff specifies the policy for how long to wait between retries + Backoff Backoff + + // ErrorHandler specifies the custom error handler to use, if any + ErrorHandler ErrorHandler } // NewClient creates a new Client with default settings. @@ -139,12 +255,18 @@ func NewClient() *Client { RetryWaitMax: defaultRetryWaitMax, RetryMax: defaultRetryMax, CheckRetry: DefaultRetryPolicy, + Backoff: DefaultBackoff, } } // DefaultRetryPolicy provides a default callback for Client.CheckRetry, which // will retry on connection errors and server errors. -func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { +func DefaultRetryPolicy(ctx context.Context, resp *http.Response, err error) (bool, error) { + // do not retry on context.Canceled or context.DeadlineExceeded + if ctx.Err() != nil { + return false, ctx.Err() + } + if err != nil { return true, err } @@ -152,24 +274,92 @@ func DefaultRetryPolicy(resp *http.Response, err error) (bool, error) { // the server time to recover, as 500's are typically not permanent // errors and may relate to outages on the server side. This will catch // invalid response codes as well, like 0 and 999. - if resp.StatusCode == 0 || resp.StatusCode >= 500 { + if resp.StatusCode == 0 || (resp.StatusCode >= 500 && resp.StatusCode != 501) { return true, nil } return false, nil } +// DefaultBackoff provides a default callback for Client.Backoff which +// will perform exponential backoff based on the attempt number and limited +// by the provided minimum and maximum durations. +func DefaultBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + mult := math.Pow(2, float64(attemptNum)) * float64(min) + sleep := time.Duration(mult) + if float64(sleep) != mult || sleep > max { + sleep = max + } + return sleep +} + +// LinearJitterBackoff provides a callback for Client.Backoff which will +// perform linear backoff based on the attempt number and with jitter to +// prevent a thundering herd. +// +// min and max here are *not* absolute values. The number to be multipled by +// the attempt number will be chosen at random from between them, thus they are +// bounding the jitter. +// +// For instance: +// * To get strictly linear backoff of one second increasing each retry, set +// both to one second (1s, 2s, 3s, 4s, ...) +// * To get a small amount of jitter centered around one second increasing each +// retry, set to around one second, such as a min of 800ms and max of 1200ms +// (892ms, 2102ms, 2945ms, 4312ms, ...) +// * To get extreme jitter, set to a very wide spread, such as a min of 100ms +// and a max of 20s (15382ms, 292ms, 51321ms, 35234ms, ...) +func LinearJitterBackoff(min, max time.Duration, attemptNum int, resp *http.Response) time.Duration { + // attemptNum always starts at zero but we want to start at 1 for multiplication + attemptNum++ + + if max <= min { + // Unclear what to do here, or they are the same, so return min * + // attemptNum + return min * time.Duration(attemptNum) + } + + // Seed rand; doing this every time is fine + rand := rand.New(rand.NewSource(int64(time.Now().Nanosecond()))) + + // Pick a random number that lies somewhere between the min and max and + // multiply by the attemptNum. attemptNum starts at zero so we always + // increment here. We first get a random percentage, then apply that to the + // difference between min and max, and add to min. + jitter := rand.Float64() * float64(max-min) + jitterMin := int64(jitter) + int64(min) + return time.Duration(jitterMin * int64(attemptNum)) +} + +// PassthroughErrorHandler is an ErrorHandler that directly passes through the +// values from the net/http library for the final request. The body is not +// closed. +func PassthroughErrorHandler(resp *http.Response, err error, _ int) (*http.Response, error) { + return resp, err +} + // Do wraps calling an HTTP method with retries. func (c *Client) Do(req *Request) (*http.Response, error) { - c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + if c.Logger != nil { + c.Logger.Printf("[DEBUG] %s %s", req.Method, req.URL) + } + + var resp *http.Response + var err error for i := 0; ; i++ { var code int // HTTP response code // Always rewind the request body when non-nil. if req.body != nil { - if _, err := req.body.Seek(0, 0); err != nil { - return nil, fmt.Errorf("failed to seek body: %v", err) + body, err := req.body() + if err != nil { + return resp, err + } + if c, ok := body.(io.ReadCloser); ok { + req.Request.Body = c + } else { + req.Request.Body = ioutil.NopCloser(body) } } @@ -178,13 +368,18 @@ func (c *Client) Do(req *Request) (*http.Response, error) { } // Attempt the request - resp, err := c.HTTPClient.Do(req.Request) + resp, err = c.HTTPClient.Do(req.Request) + if resp != nil { + code = resp.StatusCode + } // Check if we should continue with retries. - checkOK, checkErr := c.CheckRetry(resp, err) + checkOK, checkErr := c.CheckRetry(req.Request.Context(), resp, err) if err != nil { - c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + if c.Logger != nil { + c.Logger.Printf("[ERR] %s %s request failed: %v", req.Method, req.URL, err) + } } else { // Call this here to maintain the behavior of logging all requests, // even if CheckRetry signals to stop. @@ -202,25 +397,38 @@ func (c *Client) Do(req *Request) (*http.Response, error) { return resp, err } + // We do this before drainBody beause there's no need for the I/O if + // we're breaking out + remain := c.RetryMax - i + if remain <= 0 { + break + } + // We're going to retry, consume any response to reuse the connection. - if err == nil { + if err == nil && resp != nil { c.drainBody(resp.Body) } - remain := c.RetryMax - i - if remain == 0 { - break - } - wait := backoff(c.RetryWaitMin, c.RetryWaitMax, i) + wait := c.Backoff(c.RetryWaitMin, c.RetryWaitMax, i, resp) desc := fmt.Sprintf("%s %s", req.Method, req.URL) if code > 0 { desc = fmt.Sprintf("%s (status: %d)", desc, code) } - c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + if c.Logger != nil { + c.Logger.Printf("[DEBUG] %s: retrying in %s (%d left)", desc, wait, remain) + } time.Sleep(wait) } - // Return an error if we fall out of the retry loop + if c.ErrorHandler != nil { + return c.ErrorHandler(resp, err, c.RetryMax+1) + } + + // By default, we close the response body and return an error without + // returning the response + if resp != nil { + resp.Body.Close() + } return nil, fmt.Errorf("%s %s giving up after %d attempts", req.Method, req.URL, c.RetryMax+1) } @@ -230,7 +438,9 @@ func (c *Client) drainBody(body io.ReadCloser) { defer body.Close() _, err := io.Copy(ioutil.Discard, io.LimitReader(body, respReadLimit)) if err != nil { - c.Logger.Printf("[ERR] error reading response body: %v", err) + if c.Logger != nil { + c.Logger.Printf("[ERR] error reading response body: %v", err) + } } } @@ -263,12 +473,12 @@ func (c *Client) Head(url string) (*http.Response, error) { } // Post is a shortcut for doing a POST request without making a new client. -func Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { +func Post(url, bodyType string, body interface{}) (*http.Response, error) { return defaultClient.Post(url, bodyType, body) } // Post is a convenience method for doing simple POST requests. -func (c *Client) Post(url, bodyType string, body io.ReadSeeker) (*http.Response, error) { +func (c *Client) Post(url, bodyType string, body interface{}) (*http.Response, error) { req, err := NewRequest("POST", url, body) if err != nil { return nil, err @@ -288,15 +498,3 @@ func PostForm(url string, data url.Values) (*http.Response, error) { func (c *Client) PostForm(url string, data url.Values) (*http.Response, error) { return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) } - -// backoff is used to calculate how long to sleep before retrying -// after observing failures. It takes the minimum/maximum wait time and -// iteration, and returns the duration to wait. -func backoff(min, max time.Duration, iter int) time.Duration { - mult := math.Pow(2, float64(iter)) * float64(min) - sleep := time.Duration(mult) - if float64(sleep) != mult || sleep > max { - sleep = max - } - return sleep -} diff --git a/vendor/vendor.json b/vendor/vendor.json index c2463e23cc5c..a99e4e6ec667 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -162,7 +162,7 @@ {"path":"github.com/hashicorp/go-msgpack/codec","revision":"fa3f63826f7c23912c15263591e65d54d080b458"}, {"path":"github.com/hashicorp/go-multierror","revision":"d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"}, {"path":"github.com/hashicorp/go-plugin","checksumSHA1":"lbG9uwM7qJlTIBg+8mjCC88sCPc=","revision":"e8d22c780116115ae5624720c9af0c97afe4f551","revisionTime":"2018-03-31T00:25:53Z"}, - {"path":"github.com/hashicorp/go-retryablehttp","checksumSHA1":"ErJHGU6AVPZM9yoY/xV11TwSjQs=","revision":"6e85be8fee1dcaa02c0eaaac2df5a8fbecf94145","revisionTime":"2016-09-30T03:51:02Z"}, + {"path":"github.com/hashicorp/go-retryablehttp","checksumSHA1":"/yKfFSspjuDzyOe/bBslrPzyfYM=","revision":"e651d75abec6fbd4f2c09508f72ae7af8a8b7171","revisionTime":"2018-07-18T19:50:05Z"}, {"path":"github.com/hashicorp/go-rootcerts","checksumSHA1":"A1PcINvF3UiwHRKn8UcgARgvGRs=","revision":"6bb64b370b90e7ef1fa532be9e591a81c3493e00","revisionTime":"2016-05-03T14:34:40Z"}, {"path":"github.com/hashicorp/go-safetemp","checksumSHA1":"CduvzBFfTv77nhjtXPGdIjQQLMI=","revision":"b1a1dbde6fdc11e3ae79efd9039009e22d4ae240","revisionTime":"2018-03-26T21:11:50Z"}, {"path":"github.com/hashicorp/go-sockaddr","checksumSHA1":"J47ySO1q0gcnmoMnir1q1loKzCk=","revision":"6d291a969b86c4b633730bfc6b8b9d64c3aafed9","revisionTime":"2018-03-20T11:50:54Z"}, From 56f9607c1aeb4d8443d0b86194dbb1659a720b73 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 10 Sep 2018 15:03:17 -0700 Subject: [PATCH 3/6] circonus --- .../circonus-gometrics/OPTIONS.md | 8 +- .../circonus-gometrics/README.md | 8 +- .../circonus-gometrics/api/api.go | 92 ++++- .../circonus-gometrics/api/check_bundle.go | 2 +- .../api/dashboard-example.json | 390 ------------------ .../circonus-gometrics/api/dashboard.go | 149 +++---- .../circonus-gometrics/api/doc.go | 2 +- .../circonus-gometrics/api/graph.go | 29 +- .../circonus-gometrics/api/metric.go | 2 +- .../circonus-gometrics/api/rule_set_group.go | 2 +- .../circonus-gometrics/api/worksheet.go | 16 +- .../circonus-gometrics/checkmgr/broker.go | 36 +- .../circonus-gometrics/checkmgr/cert.go | 19 +- .../circonus-gometrics/checkmgr/check.go | 42 +- .../circonus-gometrics/checkmgr/checkmgr.go | 134 ++++-- .../circonus-gometrics/checkmgr/metrics.go | 3 +- .../circonus-gometrics/circonus-gometrics.go | 159 +++++-- .../circonus-gometrics/counter.go | 15 + .../circonus-labs/circonus-gometrics/gauge.go | 94 +++-- .../circonus-gometrics/histogram.go | 24 ++ .../circonus-gometrics/submit.go | 44 +- .../circonus-labs/circonus-gometrics/tools.go | 1 - .../circonus-labs/circonus-gometrics/util.go | 21 +- vendor/vendor.json | 9 +- 24 files changed, 656 insertions(+), 645 deletions(-) delete mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard-example.json diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md index 3926c3e638ca..f54c9984e7a3 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md +++ b/vendor/github.com/circonus-labs/circonus-gometrics/OPTIONS.md @@ -32,6 +32,8 @@ func main() { cfg.CheckManager.API.TokenKey = "" cfg.CheckManager.API.TokenApp = "circonus-gometrics" cfg.CheckManager.API.TokenURL = "https://api.circonus.com/v2" + cfg.CheckManager.API.CACert = nil + cfg.CheckManager.API.TLSConfig = nil // Check _, an := path.Split(os.Args[0]) @@ -51,6 +53,7 @@ func main() { cfg.CheckManager.Broker.ID = "" cfg.CheckManager.Broker.SelectTag = "" cfg.CheckManager.Broker.MaxResponseTime = "500ms" + cfg.CheckManager.Broker.TLSConfig = nil // create a new cgm instance and start sending metrics... // see the complete example in the main README. @@ -72,9 +75,11 @@ func main() { | `cfg.CheckManager.API.TokenKey` | "" | [Circonus API Token key](https://login.circonus.com/user/tokens) | | `cfg.CheckManager.API.TokenApp` | "circonus-gometrics" | App associated with API token | | `cfg.CheckManager.API.URL` | "https://api.circonus.com/v2" | Circonus API URL | +| `cfg.CheckManager.API.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus API | +| `cfg.CheckManager.API.CACert` | nil | DEPRECATED - use TLSConfig ~~[*x509.CertPool](https://golang.org/pkg/crypto/x509/#CertPool) with CA Cert to validate API endpoint using internal CA or self-signed certificates~~ | |Check|| | `cfg.CheckManager.Check.ID` | "" | Check ID of previously created check. (*Note: **check id** not **check bundle id**.*) | -| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. | +| `cfg.CheckManager.Check.SubmissionURL` | "" | Submission URL of previously created check. Metrics can also be sent to a local [circonus-agent](https://github.com/circonus-labs/circonus-agent) by using the agent's URL (e.g. `http://127.0.0.1:2609/write/appid` where `appid` is a unique identifier for the application which will prefix all metrics. Additionally, the circonus-agent can optionally listen for requests to `/write` on a unix socket - to leverage this feature, use a URL such as `http+unix:///path/to/socket_file/write/appid`). | | `cfg.CheckManager.Check.InstanceID` | hostname:program name | An identifier for the 'group of metrics emitted by this process or service'. | | `cfg.CheckManager.Check.TargetHost` | InstanceID | Explicit setting of `check.target`. | | `cfg.CheckManager.Check.DisplayName` | InstanceID | Custom `check.display_name`. Shows in UI check list. | @@ -87,6 +92,7 @@ func main() { | `cfg.CheckManager.Broker.ID` | "" | ID of a specific broker to use when creating a check. Default is to use a random enterprise broker or the public Circonus default broker. | | `cfg.CheckManager.Broker.SelectTag` | "" | Used to select a broker with the same tag(s). If more than one broker has the tag(s), one will be selected randomly from the resulting list. (e.g. could be used to select one from a list of brokers serving a specific colo/region. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west") | | `cfg.CheckManager.Broker.MaxResponseTime` | "500ms" | Maximum amount time to wait for a broker connection test to be considered valid. (if latency is > the broker will be considered invalid and not available for selection.) | +| `cfg.CheckManager.Broker.TLSConfig` | nil | Custom tls.Config to use when communicating with Circonus Broker | ## Notes: diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/README.md b/vendor/github.com/circonus-labs/circonus-gometrics/README.md index a6291ef1ed98..323f97c02a17 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/README.md +++ b/vendor/github.com/circonus-labs/circonus-gometrics/README.md @@ -35,7 +35,7 @@ func main() { logger.Println("Configuring cgm") cmc := &cgm.Config{} - cmc.Debug := false // set to true for debug messages + cmc.Debug = false // set to true for debug messages cmc.Log = logger // Circonus API Token key (https://login.circonus.com/user/tokens) @@ -122,9 +122,10 @@ func main() { cmc.CheckManager.API.TokenKey = os.Getenv("CIRCONUS_API_TOKEN") cmc.CheckManager.API.TokenApp = os.Getenv("CIRCONUS_API_APP") cmc.CheckManager.API.URL = os.Getenv("CIRCONUS_API_URL") + cmc.CheckManager.API.TLSConfig = nil // Check configuration options - cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL") + cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISSION_URL") cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID") cmc.CheckManager.Check.InstanceID = "" cmc.CheckManager.Check.DisplayName = "" @@ -142,6 +143,7 @@ func main() { cmc.CheckManager.Broker.ID = "" cmc.CheckManager.Broker.SelectTag = "" cmc.CheckManager.Broker.MaxResponseTime = "500ms" + cmc.CheckManager.Broker.TLSConfig = nil logger.Println("Creating new cgm instance") @@ -230,3 +232,5 @@ func main() { ``` Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file. + +[![codecov](https://codecov.io/gh/maier/circonus-gometrics/branch/master/graph/badge.svg)](https://codecov.io/gh/maier/circonus-gometrics) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go index 73480e4c4281..ee6a411c97a5 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/api.go @@ -6,7 +6,10 @@ package api import ( "bytes" + "context" crand "crypto/rand" + "crypto/tls" + "crypto/x509" "errors" "fmt" "io/ioutil" @@ -14,10 +17,10 @@ import ( "math" "math/big" "math/rand" + "net" "net/http" "net/url" "os" - "regexp" "strings" "sync" "time" @@ -49,6 +52,9 @@ type TokenKeyType string // TokenAppType - Circonus API Token app name type TokenAppType string +// TokenAccountIDType - Circonus API Token account id +type TokenAccountIDType string + // CIDType Circonus object cid type CIDType *string @@ -69,11 +75,25 @@ type TagType []string // Config options for Circonus API type Config struct { - URL string + // URL defines the API URL - default https://api.circonus.com/v2/ + URL string + + // TokenKey defines the key to use when communicating with the API TokenKey string + + // TokenApp defines the app to use when communicating with the API TokenApp string - Log *log.Logger - Debug bool + + TokenAccountID string + + // CACert deprecating, use TLSConfig instead + CACert *x509.CertPool + + // TLSConfig defines a custom tls configuration to use when communicating with the API + TLSConfig *tls.Config + + Log *log.Logger + Debug bool } // API Circonus API @@ -81,6 +101,9 @@ type API struct { apiURL *url.URL key TokenKeyType app TokenAppType + accountID TokenAccountIDType + caCert *x509.CertPool + tlsConfig *tls.Config Debug bool Log *log.Logger useExponentialBackoff bool @@ -114,6 +137,8 @@ func New(ac *Config) (*API, error) { app = defaultAPIApp } + acctID := TokenAccountIDType(ac.TokenAccountID) + au := string(ac.URL) if au == "" { au = defaultAPIURL @@ -132,11 +157,14 @@ func New(ac *Config) (*API, error) { } a := &API{ - apiURL: apiURL, - key: key, - app: app, - Debug: ac.Debug, - Log: ac.Log, + apiURL: apiURL, + key: key, + app: app, + accountID: acctID, + caCert: ac.CACert, + tlsConfig: ac.TLSConfig, + Debug: ac.Debug, + Log: ac.Log, useExponentialBackoff: false, } @@ -213,7 +241,7 @@ func (a *API) apiRequest(reqMethod string, reqPath string, data []byte) ([]byte, if !a.useExponentialBackoff { break } - if matched, _ := regexp.MatchString("code 403", err.Error()); matched { + if strings.Contains(err.Error(), "code 403") { break } } @@ -245,14 +273,18 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er reqURL += "/" } if len(reqPath) >= 3 && reqPath[:3] == "/v2" { - reqURL += reqPath[3:len(reqPath)] + reqURL += reqPath[3:] } else { reqURL += reqPath } // keep last HTTP error in the event of retry failure var lastHTTPError error - retryPolicy := func(resp *http.Response, err error) (bool, error) { + retryPolicy := func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctxErr := ctx.Err(); ctxErr != nil { + return false, ctxErr + } + if err != nil { lastHTTPError = err return true, err @@ -285,8 +317,44 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er req.Header.Add("Accept", "application/json") req.Header.Add("X-Circonus-Auth-Token", string(a.key)) req.Header.Add("X-Circonus-App-Name", string(a.app)) + if string(a.accountID) != "" { + req.Header.Add("X-Circonus-Account-ID", string(a.accountID)) + } client := retryablehttp.NewClient() + if a.apiURL.Scheme == "https" { + var tlscfg *tls.Config + if a.tlsConfig != nil { // preference full custom tls config + tlscfg = a.tlsConfig + } else if a.caCert != nil { + tlscfg = &tls.Config{RootCAs: a.caCert} + } + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlscfg, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } else { + client.HTTPClient.Transport = &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + TLSHandshakeTimeout: 10 * time.Second, + DisableKeepAlives: true, + MaxIdleConnsPerHost: -1, + DisableCompression: true, + } + } + a.useExponentialBackoffmu.Lock() eb := a.useExponentialBackoff a.useExponentialBackoffmu.Unlock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go index 8ab851e0ca68..c202853c2ed0 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go @@ -38,7 +38,7 @@ type CheckBundle struct { Checks []string `json:"_checks,omitempty"` // [] len >= 0 CheckUUIDs []string `json:"_check_uuids,omitempty"` // [] len >= 0 CID string `json:"_cid,omitempty"` // string - Config CheckBundleConfig `json:"config,omitempty"` // NOTE contents of config are check type specific, map len >= 0 + Config CheckBundleConfig `json:"config"` // NOTE contents of config are check type specific, map len >= 0 Created uint `json:"_created,omitempty"` // uint DisplayName string `json:"display_name"` // string LastModifedBy string `json:"_last_modifed_by,omitempty"` // string diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard-example.json b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard-example.json deleted file mode 100644 index 627639e74067..000000000000 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard-example.json +++ /dev/null @@ -1,390 +0,0 @@ -{ - "_active": true, - "_cid": "/dashboard/1234", - "_created": 1483193930, - "_created_by": "/user/1234", - "_dashboard_uuid": "01234567-89ab-cdef-0123-456789abcdef", - "_last_modified": 1483450351, - "account_default": false, - "grid_layout": { - "height": 4, - "width": 4 - }, - "options": { - "access_configs": [ - ], - "fullscreen_hide_title": false, - "hide_grid": false, - "linkages": [ - ], - "scale_text": true, - "text_size": 16 - }, - "shared": false, - "title": "foo bar baz", - "widgets": [ - { - "active": true, - "height": 1, - "name": "Cluster", - "origin": "d0", - "settings": { - "account_id": "1234", - "algorithm": "cor", - "cluster_id": 1234, - "cluster_name": "test", - "layout": "compact", - "size": "medium", - "threshold": 0.7 - }, - "type": "cluster", - "widget_id": "w4", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "HTML", - "origin": "d1", - "settings": { - "markup": "

foo

", - "title": "html" - }, - "type": "html", - "widget_id": "w9", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Chart", - "origin": "c0", - "settings": { - "chart_type": "bar", - "datapoints": [ - { - "_check_id": 1234, - "_metric_type": "numeric", - "account_id": "1234", - "label": "Used", - "metric": "01234567-89ab-cdef-0123-456789abcdef:vm`memory`used" - }, - { - "_check_id": 1234, - "_metric_type": "numeric", - "account_id": "1234", - "label": "Free", - "metric": "01234567-89ab-cdef-0123-456789abcdef:vm`memory`free" - } - ], - "definition": { - "datasource": "realtime", - "derive": "gauge", - "disable_autoformat": false, - "formula": "", - "legend": { - "show": false, - "type": "html" - }, - "period": 0, - "pop_onhover": false, - "wedge_labels": { - "on_chart": true, - "tooltips": false - }, - "wedge_values": { - "angle": "0", - "color": "background", - "show": true - } - }, - "title": "chart graph" - }, - "type": "chart", - "widget_id": "w5", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Alerts", - "origin": "a0", - "settings": { - "account_id": "1234", - "acknowledged": "all", - "cleared": "all", - "contact_groups": [ - ], - "dependents": "all", - "display": "list", - "maintenance": "all", - "min_age": "0", - "off_hours": [ - 17, - 9 - ], - "search": "", - "severity": "12345", - "tag_filter_set": [ - ], - "time_window": "30M", - "title": "alerts", - "week_days": [ - "sun", - "mon", - "tue", - "wed", - "thu", - "fri", - "sat" - ] - }, - "type": "alerts", - "widget_id": "w2", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Graph", - "origin": "c1", - "settings": { - "_graph_title": "foo bar / %Used", - "account_id": "1234", - "date_window": "2w", - "graph_id": "01234567-89ab-cdef-0123-456789abcdef", - "hide_xaxis": false, - "hide_yaxis": false, - "key_inline": false, - "key_loc": "noop", - "key_size": "1", - "key_wrap": false, - "label": "", - "overlay_set_id": "", - "period": "2000", - "previous_graph_id": "null", - "realtime": false, - "show_flags": false - }, - "type": "graph", - "widget_id": "w8", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "List", - "origin": "a2", - "settings": { - "account_id": "1234", - "limit": "10", - "search": "", - "type": "graph" - }, - "type": "list", - "widget_id": "w10", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Status", - "origin": "b2", - "settings": { - "account_id": "1234", - "agent_status_settings": { - "search": "", - "show_agent_types": "both", - "show_contact": false, - "show_feeds": true, - "show_setup": false, - "show_skew": true, - "show_updates": true - }, - "content_type": "agent_status", - "host_status_settings": { - "layout_style": "grid", - "search": "", - "sort_by": "alerts", - "tag_filter_set": [ - ] - } - }, - "type": "status", - "widget_id": "w11", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Text", - "origin": "d2", - "settings": { - "autoformat": false, - "body_format": "

{metric_name} ({value_type})
{metric_value}
{value_date}

", - "datapoints": [ - { - "_cluster_title": "test", - "_label": "Cluster: test", - "account_id": "1234", - "cluster_id": 1234, - "numeric_only": false - } - ], - "period": 0, - "title_format": "Metric Status", - "use_default": true, - "value_type": "gauge" - }, - "type": "text", - "widget_id": "w13", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Chart", - "origin": "b0", - "settings": { - "chart_type": "bar", - "datapoints": [ - { - "_cluster_title": "test", - "_label": "Cluster: test", - "account_id": "1234", - "cluster_id": 1234, - "numeric_only": true - } - ], - "definition": { - "datasource": "realtime", - "derive": "gauge", - "disable_autoformat": false, - "formula": "", - "legend": { - "show": false, - "type": "html" - }, - "period": 0, - "pop_onhover": false, - "wedge_labels": { - "on_chart": true, - "tooltips": false - }, - "wedge_values": { - "angle": "0", - "color": "background", - "show": true - } - }, - "title": "chart metric cluster" - }, - "type": "chart", - "widget_id": "w3", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Gauge", - "origin": "b1", - "settings": { - "_check_id": 1234, - "account_id": "1234", - "check_uuid": "01234567-89ab-cdef-0123-456789abcdef", - "disable_autoformat": false, - "formula": "", - "metric_display_name": "%Used", - "metric_name": "fs`/foo`df_used_percent", - "period": 0, - "range_high": 100, - "range_low": 0, - "thresholds": { - "colors": [ - "#008000", - "#ffcc00", - "#ee0000" - ], - "flip": false, - "values": [ - "75%", - "87.5%" - ] - }, - "title": "Metric Gauge", - "type": "bar", - "value_type": "gauge" - }, - "type": "gauge", - "widget_id": "w7", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Text", - "origin": "c2", - "settings": { - "autoformat": false, - "body_format": "

{metric_name} ({value_type})
{metric_value}
{value_date}

", - "datapoints": [ - { - "_check_id": 1234, - "_metric_type": "numeric", - "account_id": "1234", - "label": "cache entries", - "metric": "01234567-89ab-cdef-0123-456789abcdef:foo`cache_entries" - }, - { - "_check_id": 1234, - "_metric_type": "numeric", - "account_id": "1234", - "label": "cache capacity", - "metric": "01234567-89ab-cdef-0123-456789abcdef:foo`cache_capacity" - }, - { - "_check_id": 1234, - "_metric_type": "numeric", - "account_id": "1234", - "label": "cache size", - "metric": "01234567-89ab-cdef-0123-456789abcdef:foo`cache_size" - } - ], - "period": 0, - "title_format": "Metric Status", - "use_default": true, - "value_type": "gauge" - }, - "type": "text", - "widget_id": "w12", - "width": 1 - }, - { - "active": true, - "height": 1, - "name": "Forecast", - "origin": "a1", - "settings": { - "format": "standard", - "resource_limit": "0", - "resource_usage": "metric:average(\"01234567-89ab-cdef-0123-456789abcdef\",p\"fs%60/foo%60df_used_percent\")", - "thresholds": { - "colors": [ - "#008000", - "#ffcc00", - "#ee0000" - ], - "values": [ - "1d", - "1h" - ] - }, - "title": "Resource Forecast", - "trend": "auto" - }, - "type": "forecast", - "widget_id": "w6", - "width": 1 - } - ] -} diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go index 5bca0a3cde1f..596f33db6fb5 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go @@ -24,24 +24,24 @@ type DashboardGridLayout struct { // DashboardAccessConfig defines access config type DashboardAccessConfig struct { - BlackDash bool `json:"black_dash,omitempty"` - Enabled bool `json:"enabled,omitempty"` - Fullscreen bool `json:"fullscreen,omitempty"` - FullscreenHideTitle bool `json:"fullscreen_hide_title,omitempty"` - Nickname string `json:"nickname,omitempty"` - ScaleText bool `json:"scale_text,omitempty"` - SharedID string `json:"shared_id,omitempty"` - TextSize uint `json:"text_size,omitempty"` + BlackDash bool `json:"black_dash"` + Enabled bool `json:"enabled"` + Fullscreen bool `json:"fullscreen"` + FullscreenHideTitle bool `json:"fullscreen_hide_title"` + Nickname string `json:"nickname"` + ScaleText bool `json:"scale_text"` + SharedID string `json:"shared_id"` + TextSize uint `json:"text_size"` } // DashboardOptions defines options type DashboardOptions struct { - AccessConfigs []DashboardAccessConfig `json:"access_configs,omitempty"` - FullscreenHideTitle bool `json:"fullscreen_hide_title,omitempty"` - HideGrid bool `json:"hide_grid,omitempty"` - Linkages [][]string `json:"linkages,omitempty"` - ScaleText bool `json:"scale_text,omitempty"` - TextSize uint `json:"text_size,omitempty"` + AccessConfigs []DashboardAccessConfig `json:"access_configs"` + FullscreenHideTitle bool `json:"fullscreen_hide_title"` + HideGrid bool `json:"hide_grid"` + Linkages [][]string `json:"linkages"` + ScaleText bool `json:"scale_text"` + TextSize uint `json:"text_size"` } // ChartTextWidgetDatapoint defines datapoints for charts @@ -116,67 +116,68 @@ type StatusWidgetHostStatusSettings struct { } // DashboardWidgetSettings defines settings specific to widget +// Note: optional attributes which are structs need to be pointers so they will be omitted type DashboardWidgetSettings struct { - AccountID string `json:"account_id,omitempty"` // alerts, clusters, gauges, graphs, lists, status - Acknowledged string `json:"acknowledged,omitempty"` // alerts - AgentStatusSettings StatusWidgetAgentStatusSettings `json:"agent_status_settings,omitempty"` // status - Algorithm string `json:"algorithm,omitempty"` // clusters - Autoformat bool `json:"autoformat,omitempty"` // text - BodyFormat string `json:"body_format,omitempty"` // text - ChartType string `json:"chart_type,omitempty"` // charts - CheckUUID string `json:"check_uuid,omitempty"` // gauges - Cleared string `json:"cleared,omitempty"` // alerts - ClusterID uint `json:"cluster_id,omitempty"` // clusters - ClusterName string `json:"cluster_name,omitempty"` // clusters - ContactGroups []uint `json:"contact_groups,omitempty"` // alerts - ContentType string `json:"content_type,omitempty"` // status - Datapoints []ChartTextWidgetDatapoint `json:"datapoints,omitempty"` // charts, text - DateWindow string `json:"date_window,omitempty"` // graphs - Definition ChartWidgtDefinition `json:"definition,omitempty"` // charts - Dependents string `json:"dependents,omitempty"` // alerts - DisableAutoformat bool `json:"disable_autoformat,omitempty"` // gauges - Display string `json:"display,omitempty"` // alerts - Format string `json:"format,omitempty"` // forecasts - Formula string `json:"formula,omitempty"` // gauges - GraphUUID string `json:"graph_id,omitempty"` // graphs - HideXAxis bool `json:"hide_xaxis,omitempty"` // graphs - HideYAxis bool `json:"hide_yaxis,omitempty"` // graphs - HostStatusSettings StatusWidgetHostStatusSettings `json:"host_status_settings,omitempty"` // status - KeyInline bool `json:"key_inline,omitempty"` // graphs - KeyLoc string `json:"key_loc,omitempty"` // graphs - KeySize string `json:"key_size,omitempty"` // graphs - KeyWrap bool `json:"key_wrap,omitempty"` // graphs - Label string `json:"label,omitempty"` // graphs - Layout string `json:"layout,omitempty"` // clusters - Limit string `json:"limit,omitempty"` // lists - Maintenance string `json:"maintenance,omitempty"` // alerts - Markup string `json:"markup,omitempty"` // html - MetricDisplayName string `json:"metric_display_name,omitempty"` // gauges - MetricName string `json:"metric_name,omitempty"` // gauges - MinAge string `json:"min_age,omitempty"` // alerts - OffHours []uint `json:"off_hours,omitempty"` // alerts - OverlaySetID string `json:"overlay_set_id,omitempty"` // graphs - Period interface{} `json:"period,omitempty"` // BUG type switching between widgets (doc: string; gauges, text: uint; graphs: string) - RangeHigh int `json:"range_high,omitempty"` // gauges - RangeLow int `json:"range_low,omitempty"` // gauges - Realtime bool `json:"realtime,omitempty"` // graphs - ResourceLimit string `json:"resource_limit,omitempty"` // forecasts - ResourceUsage string `json:"resource_usage,omitempty"` // forecasts - Search string `json:"search,omitempty"` // alerts, lists - Severity string `json:"severity,omitempty"` // alerts - ShowFlags bool `json:"show_flags,omitempty"` // graphs - Size string `json:"size,omitempty"` // clusters - TagFilterSet []string `json:"tag_filter_set,omitempty"` // alerts - Threshold float32 `json:"threshold,omitempty"` // clusters - Thresholds ForecastGaugeWidgetThresholds `json:"thresholds,omitempty"` // forecasts, gauges - TimeWindow string `json:"time_window,omitempty"` // alerts - Title string `json:"title,omitempty"` // alerts, charts, forecasts, gauges, html - TitleFormat string `json:"title_format,omitempty"` // text - Trend string `json:"trend,omitempty"` // forecasts - Type string `json:"type,omitempty"` // gauges, lists - UseDefault bool `json:"use_default,omitempty"` // text - ValueType string `json:"value_type,omitempty"` // gauges, text - WeekDays []string `json:"weekdays,omitempty"` // alerts + AccountID string `json:"account_id,omitempty"` // alerts, clusters, gauges, graphs, lists, status + Acknowledged string `json:"acknowledged,omitempty"` // alerts + AgentStatusSettings *StatusWidgetAgentStatusSettings `json:"agent_status_settings,omitempty"` // status + Algorithm string `json:"algorithm,omitempty"` // clusters + Autoformat bool `json:"autoformat,omitempty"` // text + BodyFormat string `json:"body_format,omitempty"` // text + ChartType string `json:"chart_type,omitempty"` // charts + CheckUUID string `json:"check_uuid,omitempty"` // gauges + Cleared string `json:"cleared,omitempty"` // alerts + ClusterID uint `json:"cluster_id,omitempty"` // clusters + ClusterName string `json:"cluster_name,omitempty"` // clusters + ContactGroups []uint `json:"contact_groups,omitempty"` // alerts + ContentType string `json:"content_type,omitempty"` // status + Datapoints []ChartTextWidgetDatapoint `json:"datapoints,omitempty"` // charts, text + DateWindow string `json:"date_window,omitempty"` // graphs + Definition *ChartWidgtDefinition `json:"definition,omitempty"` // charts + Dependents string `json:"dependents,omitempty"` // alerts + DisableAutoformat bool `json:"disable_autoformat,omitempty"` // gauges + Display string `json:"display,omitempty"` // alerts + Format string `json:"format,omitempty"` // forecasts + Formula string `json:"formula,omitempty"` // gauges + GraphUUID string `json:"graph_id,omitempty"` // graphs + HideXAxis bool `json:"hide_xaxis,omitempty"` // graphs + HideYAxis bool `json:"hide_yaxis,omitempty"` // graphs + HostStatusSettings *StatusWidgetHostStatusSettings `json:"host_status_settings,omitempty"` // status + KeyInline bool `json:"key_inline,omitempty"` // graphs + KeyLoc string `json:"key_loc,omitempty"` // graphs + KeySize uint `json:"key_size,omitempty"` // graphs + KeyWrap bool `json:"key_wrap,omitempty"` // graphs + Label string `json:"label,omitempty"` // graphs + Layout string `json:"layout,omitempty"` // clusters + Limit uint `json:"limit,omitempty"` // lists + Maintenance string `json:"maintenance,omitempty"` // alerts + Markup string `json:"markup,omitempty"` // html + MetricDisplayName string `json:"metric_display_name,omitempty"` // gauges + MetricName string `json:"metric_name,omitempty"` // gauges + MinAge string `json:"min_age,omitempty"` // alerts + OffHours []uint `json:"off_hours,omitempty"` // alerts + OverlaySetID string `json:"overlay_set_id,omitempty"` // graphs + Period uint `json:"period,omitempty"` // gauges, text, graphs + RangeHigh int `json:"range_high,omitempty"` // gauges + RangeLow int `json:"range_low,omitempty"` // gauges + Realtime bool `json:"realtime,omitempty"` // graphs + ResourceLimit string `json:"resource_limit,omitempty"` // forecasts + ResourceUsage string `json:"resource_usage,omitempty"` // forecasts + Search string `json:"search,omitempty"` // alerts, lists + Severity string `json:"severity,omitempty"` // alerts + ShowFlags bool `json:"show_flags,omitempty"` // graphs + Size string `json:"size,omitempty"` // clusters + TagFilterSet []string `json:"tag_filter_set,omitempty"` // alerts + Threshold float32 `json:"threshold,omitempty"` // clusters + Thresholds *ForecastGaugeWidgetThresholds `json:"thresholds,omitempty"` // forecasts, gauges + TimeWindow string `json:"time_window,omitempty"` // alerts + Title string `json:"title,omitempty"` // alerts, charts, forecasts, gauges, html + TitleFormat string `json:"title_format,omitempty"` // text + Trend string `json:"trend,omitempty"` // forecasts + Type string `json:"type,omitempty"` // gauges, lists + UseDefault bool `json:"use_default,omitempty"` // text + ValueType string `json:"value_type,omitempty"` // gauges, text + WeekDays []string `json:"weekdays,omitempty"` // alerts } // DashboardWidget defines widget diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go index 63904d7844b5..bdceae5d0427 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go @@ -56,7 +56,7 @@ Verbs Delete remove an item - e.g. DeleteAnnotation, DeleteAnnotationByCID Search search for item(s) - e.g. SearchAnnotations New new item config - e.g. NewAnnotation (returns an empty item, - any applicable defautls defined) + any applicable defaults defined) Not all endpoints support all verbs. */ diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go index ab98e975c5ec..643eff4bba6b 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go @@ -37,13 +37,13 @@ type GraphAccessKey struct { // GraphComposite defines a composite type GraphComposite struct { - Axis string `json:"axis,omitempty"` // string - Color string `json:"color,omitempty"` // string - DataFormula *string `json:"data_formula,omitempty"` // string or null - Hidden bool `json:"hidden,omitempty"` // boolean - LegendFormula *string `json:"legend_formula,omitempty"` // string or null - Name string `json:"name,omitempty"` // string - Stack *uint `json:"stack,omitempty"` // uint or null + Axis string `json:"axis"` // string + Color string `json:"color"` // string + DataFormula *string `json:"data_formula"` // string or null + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + Name string `json:"name"` // string + Stack *uint `json:"stack"` // uint or null } // GraphDatapoint defines a datapoint @@ -65,17 +65,18 @@ type GraphDatapoint struct { // GraphGuide defines a guide type GraphGuide struct { - Color string `json:"color,omitempty"` // string - DataFormula *string `json:"data_formula,omitempty"` // string or null - Hidden bool `json:"hidden,omitempty"` // boolean - LegendFormula *string `json:"legend_formula,omitempty"` // string or null - Name string `json:"name,omitempty"` // string + Color string `json:"color"` // string + DataFormula *string `json:"data_formula"` // string or null + Hidden bool `json:"hidden"` // boolean + LegendFormula *string `json:"legend_formula"` // string or null + Name string `json:"name"` // string } // GraphMetricCluster defines a metric cluster type GraphMetricCluster struct { AggregateFunc string `json:"aggregate_function,omitempty"` // string Axis string `json:"axis,omitempty"` // string + Color *string `json:"color,omitempty"` // string DataFormula *string `json:"data_formula"` // string or null Hidden bool `json:"hidden"` // boolean LegendFormula *string `json:"legend_formula"` // string or null @@ -142,7 +143,7 @@ type Graph struct { Datapoints []GraphDatapoint `json:"datapoints,omitempt"` // [] len >= 0 Description string `json:"description,omitempty"` // string Guides []GraphGuide `json:"guides,omitempty"` // [] len >= 0 - LineStyle string `json:"line_style,omitempty"` // string + LineStyle *string `json:"line_style"` // string or null LogLeftY *int `json:"logarithmic_left_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string) LogRightY *int `json:"logarithmic_right_y,string,omitempty"` // int encoded as string or null BUG doc: number (not string) MaxLeftY *float64 `json:"max_left_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) @@ -152,7 +153,7 @@ type Graph struct { MinRightY *float64 `json:"min_right_y,string,omitempty"` // float64 encoded as string or null BUG doc: number (not string) Notes *string `json:"notes,omitempty"` // string or null OverlaySets *map[string]GraphOverlaySet `json:"overlay_sets,omitempty"` // GroupOverLaySets or null - Style string `json:"style,omitempty"` // string + Style *string `json:"style"` // string or null Tags []string `json:"tags,omitempty"` // [] len >= 0 Title string `json:"title,omitempty"` // string } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go index a77d41ce5bdb..3608b06ff900 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go @@ -26,7 +26,7 @@ type Metric struct { CheckTags []string `json:"_check_tags,omitempty"` // [] len >= 0 CheckUUID string `json:"_check_uuid,omitempty"` // string CID string `json:"_cid,omitempty"` // string - Histogram bool `json:"_histogram,omitempty"` // boolean + Histogram string `json:"_histogram,omitempty"` // string Link *string `json:"link,omitempty"` // string or null MetricName string `json:"_metric_name,omitempty"` // string MetricType string `json:"_metric_type,omitempty"` // string diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go index a15743061750..382c9221c66f 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go @@ -166,7 +166,7 @@ func (a *API) DeleteRuleSetGroup(cfg *RuleSetGroup) (bool, error) { return a.DeleteRuleSetGroupByCID(CIDType(&cfg.CID)) } -// DeleteRuleSetGroupByCID deletes rule set group wiht passed cid. +// DeleteRuleSetGroupByCID deletes rule set group with passed cid. func (a *API) DeleteRuleSetGroupByCID(cid CIDType) (bool, error) { if cid == nil || *cid == "" { return false, fmt.Errorf("Invalid rule set group CID [none]") diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go b/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go index 0dd5e9373408..5c33642030a6 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go @@ -30,14 +30,14 @@ type WorksheetSmartQuery struct { // Worksheet defines a worksheet. See https://login.circonus.com/resources/api/calls/worksheet for more information. type Worksheet struct { - CID string `json:"_cid,omitempty"` // string - Description *string `json:"description"` // string or null - Favorite bool `json:"favorite"` // boolean - Graphs []WorksheetGraph `json:"worksheets,omitempty"` // [] len >= 0 - Notes *string `json:"notes"` // string or null - SmartQueries []WorksheetSmartQuery `json:"smart_queries,omitempty"` // [] len >= 0 - Tags []string `json:"tags"` // [] len >= 0 - Title string `json:"title"` // string + CID string `json:"_cid,omitempty"` // string + Description *string `json:"description"` // string or null + Favorite bool `json:"favorite"` // boolean + Graphs []WorksheetGraph `json:"graphs"` // [] len >= 0 + Notes *string `json:"notes"` // string or null + SmartQueries []WorksheetSmartQuery `json:"smart_queries"` // [] len >= 0 + Tags []string `json:"tags"` // [] len >= 0 + Title string `json:"title"` // string } // NewWorksheet returns a new Worksheet (with defaults, if applicable) diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go index caeaaef33d78..221d8a247995 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go @@ -80,6 +80,7 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp func (cm *CheckManager) selectBroker() (*api.Broker, error) { var brokerList *[]api.Broker var err error + enterpriseType := "enterprise" if len(cm.brokerSelectTag) > 0 { filter := api.SearchFilterType{ @@ -104,9 +105,10 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { haveEnterprise := false for _, broker := range *brokerList { + broker := broker if cm.isValidBroker(&broker) { validBrokers[broker.CID] = broker - if broker.Type == "enterprise" { + if broker.Type == enterpriseType { haveEnterprise = true } } @@ -114,7 +116,7 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { if haveEnterprise { // eliminate non-enterprise brokers from valid brokers for k, v := range validBrokers { - if v.Type != "enterprise" { + if v.Type != enterpriseType { delete(validBrokers, k) } } @@ -138,8 +140,20 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) { // Verify broker supports the check type to be used func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool { + baseType := string(checkType) + + for _, module := range details.Modules { + if module == baseType { + return true + } + } + + if idx := strings.Index(baseType, ":"); idx > 0 { + baseType = baseType[0:idx] + } + for _, module := range details.Modules { - if CheckTypeType(module) == checkType { + if module == baseType { return true } } @@ -152,8 +166,15 @@ func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { var brokerHost string var brokerPort string + + if broker.Type != "circonus" && broker.Type != "enterprise" { + return false + } + valid := false + for _, detail := range broker.Details { + detail := detail // broker must be active if detail.Status != statusActive { @@ -174,7 +195,7 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { if detail.ExternalPort != 0 { brokerPort = strconv.Itoa(int(detail.ExternalPort)) } else { - if *detail.Port != 0 { + if detail.Port != nil && *detail.Port != 0 { brokerPort = strconv.Itoa(int(*detail.Port)) } else { brokerPort = "43191" @@ -183,10 +204,15 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool { if detail.ExternalHost != nil && *detail.ExternalHost != "" { brokerHost = *detail.ExternalHost - } else { + } else if detail.IP != nil && *detail.IP != "" { brokerHost = *detail.IP } + if brokerHost == "" { + cm.Log.Printf("[WARN] Broker '%s' instance %s has no IP or external host set", broker.Name, detail.CN) + continue + } + if brokerHost == "trap.noit.circonus.net" && brokerPort != "443" { brokerPort = "443" } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go index 01f65917f83c..cbe3ba7068a4 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/cert.go @@ -42,17 +42,22 @@ type CACert struct { } // loadCACert loads the CA cert for the broker designated by the submission url -func (cm *CheckManager) loadCACert() { +func (cm *CheckManager) loadCACert() error { if cm.certPool != nil { - return + return nil } cm.certPool = x509.NewCertPool() - cert, err := cm.fetchCert() - if err != nil { - if cm.Debug { - cm.Log.Printf("[DEBUG] Unable to fetch ca.crt, using default. %+v\n", err) + var cert []byte + var err error + + if cm.enabled { + // only attempt to retrieve broker CA cert if + // the check is being managed. + cert, err = cm.fetchCert() + if err != nil { + return err } } @@ -61,6 +66,8 @@ func (cm *CheckManager) loadCACert() { } cm.certPool.AppendCertsFromPEM(cert) + + return nil } // fetchCert fetches CA certificate using Circonus API diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go index fd31c6d25607..2f0c9eb13f04 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go @@ -166,7 +166,7 @@ func (cm *CheckManager) initializeTrapURL() error { // new search (check.target != instanceid, instanceid encoded in notes field) searchCriteria := fmt.Sprintf( "(active:1)(type:\"%s\")(tags:%s)", cm.checkType, strings.Join(cm.checkSearchTag, ",")) - filterCriteria := map[string][]string{"f_notes": []string{*cm.getNotes()}} + filterCriteria := map[string][]string{"f_notes": {*cm.getNotes()}} checkBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria) if err != nil { return err @@ -243,6 +243,18 @@ func (cm *CheckManager) initializeTrapURL() error { } cm.trapCN = BrokerCNType(cn) + if cm.enabled { + u, err := url.Parse(string(cm.trapURL)) + if err != nil { + return err + } + if u.Scheme == "https" { + if err := cm.loadCACert(); err != nil { + return err + } + } + } + cm.trapLastUpdate = time.Now() return nil @@ -295,12 +307,9 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) return nil, nil, err } - config := &api.CheckBundle{ - Brokers: []string{broker.CID}, - Config: map[config.Key]string{ - config.AsyncMetrics: "true", - config.Secret: checkSecret, - }, + chkcfg := &api.CheckBundle{ + Brokers: []string{broker.CID}, + Config: make(map[config.Key]string), DisplayName: string(cm.checkDisplayName), Metrics: []api.CheckBundleMetric{}, MetricLimit: config.DefaultCheckBundleMetricLimit, @@ -313,7 +322,24 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error) Type: string(cm.checkType), } - checkBundle, err := cm.apih.CreateCheckBundle(config) + if len(cm.customConfigFields) > 0 { + for fld, val := range cm.customConfigFields { + chkcfg.Config[config.Key(fld)] = val + } + } + + // + // use the default config settings if these are NOT set by user configuration + // + if val, ok := chkcfg.Config[config.AsyncMetrics]; !ok || val == "" { + chkcfg.Config[config.AsyncMetrics] = "true" + } + + if val, ok := chkcfg.Config[config.Secret]; !ok || val == "" { + chkcfg.Config[config.Secret] = checkSecret + } + + checkBundle, err := cm.apih.CreateCheckBundle(chkcfg) if err != nil { return nil, nil, err } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go index f78da390f661..80b0c08e1813 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go @@ -2,25 +2,27 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package checkmgr provides a check management interace to circonus-gometrics +// Package checkmgr provides a check management interface to circonus-gometrics package checkmgr import ( "crypto/tls" "crypto/x509" - "errors" "fmt" "io/ioutil" "log" "net/url" "os" "path" + "regexp" "strconv" "strings" "sync" "time" "github.com/circonus-labs/circonus-gometrics/api" + "github.com/pkg/errors" + "github.com/tv42/httpunix" ) // Check management offers: @@ -35,7 +37,7 @@ import ( // - configuration parameters other than Check.SubmissionUrl, Debug and Log are ignored // - note: SubmissionUrl is **required** in this case as there is no way to derive w/o api // configure with api token - check management enabled -// - all otehr configuration parameters affect how the trap url is obtained +// - all other configuration parameters affect how the trap url is obtained // 1. provided (Check.SubmissionUrl) // 2. via check lookup (CheckConfig.Id) // 3. via a search using CheckConfig.InstanceId + CheckConfig.SearchTag @@ -85,6 +87,10 @@ type CheckConfig struct { // overrides the behavior and will re-activate the metric when it is // encountered. "(true|false)", default "false" ForceMetricActivation string + // Type of check to use (default: httptrap) + Type string + // Custom check config fields (default: none) + CustomConfigFields map[string]string } // BrokerConfig options for broker @@ -97,6 +103,8 @@ type BrokerConfig struct { // for a broker to be considered viable it must respond to a // connection attempt within this amount of time e.g. 200ms, 2s, 1m MaxResponseTime string + // TLS configuration to use when communicating within broker + TLSConfig *tls.Config } // Config options @@ -151,6 +159,7 @@ type CheckManager struct { checkSearchTag api.TagType checkSecret CheckSecretType checkTags api.TagType + customConfigFields map[string]string checkSubmissionURL api.URLType checkDisplayName CheckDisplayNameType forceMetricActivation bool @@ -164,6 +173,7 @@ type CheckManager struct { brokerID api.IDType brokerSelectTag api.TagType brokerMaxResponseTime time.Duration + brokerTLS *tls.Config // state checkBundle *api.CheckBundle @@ -176,12 +186,15 @@ type CheckManager struct { trapMaxURLAge time.Duration trapmu sync.Mutex certPool *x509.CertPool + sockRx *regexp.Regexp } // Trap config type Trap struct { - URL *url.URL - TLS *tls.Config + URL *url.URL + TLS *tls.Config + IsSocket bool + SockTransport *httpunix.Transport } // NewCheckManager returns a new check manager @@ -208,6 +221,14 @@ func New(cfg *Config) (*CheckManager, error) { cm.Log = log.New(ioutil.Discard, "", log.LstdFlags) } + { + rx, err := regexp.Compile(`^http\+unix://(?P.+)/write/(?P.+)$`) + if err != nil { + return nil, errors.Wrap(err, "compiling socket regex") + } + cm.sockRx = rx + } + if cfg.Check.SubmissionURL != "" { cm.checkSubmissionURL = api.URLType(cfg.Check.SubmissionURL) } @@ -227,13 +248,17 @@ func New(cfg *Config) (*CheckManager, error) { cfg.API.Log = cm.Log apih, err := api.New(&cfg.API) if err != nil { - return nil, err + return nil, errors.Wrap(err, "initializing api client") } cm.apih = apih } // initialize check related data - cm.checkType = defaultCheckType + if cfg.Check.Type != "" { + cm.checkType = CheckTypeType(cfg.Check.Type) + } else { + cm.checkType = defaultCheckType + } idSetting := "0" if cfg.Check.ID != "" { @@ -241,7 +266,7 @@ func New(cfg *Config) (*CheckManager, error) { } id, err := strconv.Atoi(idSetting) if err != nil { - return nil, err + return nil, errors.Wrap(err, "converting check id") } cm.checkID = api.IDType(id) @@ -256,7 +281,7 @@ func New(cfg *Config) (*CheckManager, error) { } fm, err := strconv.ParseBool(fma) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing force metric activation") } cm.forceMetricActivation = fm @@ -285,13 +310,20 @@ func New(cfg *Config) (*CheckManager, error) { cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",") } + cm.customConfigFields = make(map[string]string) + if len(cfg.Check.CustomConfigFields) > 0 { + for fld, val := range cfg.Check.CustomConfigFields { + cm.customConfigFields[fld] = val + } + } + dur := cfg.Check.MaxURLAge if dur == "" { dur = defaultTrapMaxURLAge } maxDur, err := time.ParseDuration(dur) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing max url age") } cm.trapMaxURLAge = maxDur @@ -302,7 +334,7 @@ func New(cfg *Config) (*CheckManager, error) { } id, err = strconv.Atoi(idSetting) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing broker id") } cm.brokerID = api.IDType(id) @@ -316,10 +348,13 @@ func New(cfg *Config) (*CheckManager, error) { } maxDur, err = time.ParseDuration(dur) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing broker max response time") } cm.brokerMaxResponseTime = maxDur + // add user specified tls config for broker if provided + cm.brokerTLS = cfg.Broker.TLSConfig + // metrics cm.availableMetrics = make(map[string]bool) cm.metricTags = make(map[string][]string) @@ -368,24 +403,72 @@ func (cm *CheckManager) IsReady() bool { // GetSubmissionURL returns submission url for circonus func (cm *CheckManager) GetSubmissionURL() (*Trap, error) { if cm.trapURL == "" { - return nil, fmt.Errorf("[ERROR] no submission url currently available") - // if err := cm.initializeTrapURL(); err != nil { - // return nil, err - // } + return nil, errors.Errorf("get submission url - submission url unavailable") } trap := &Trap{} u, err := url.Parse(string(cm.trapURL)) if err != nil { - return nil, err + return nil, errors.Wrap(err, "get submission url") } - trap.URL = u + if u.Scheme == "http+unix" { + service := "circonus-agent" + sockPath := "" + metricID := "" + + subNames := cm.sockRx.SubexpNames() + matches := cm.sockRx.FindAllStringSubmatch(string(cm.trapURL), -1) + for _, match := range matches { + for idx, val := range match { + switch subNames[idx] { + case "sockfile": + sockPath = val + case "id": + metricID = val + } + } + } + + if sockPath == "" || metricID == "" { + return nil, errors.Errorf("get submission url - invalid socket url (%s)", cm.trapURL) + } + + u, err = url.Parse(fmt.Sprintf("http+unix://%s/write/%s", service, metricID)) + if err != nil { + return nil, errors.Wrap(err, "get submission url") + } + trap.URL = u + + trap.SockTransport = &httpunix.Transport{ + DialTimeout: 100 * time.Millisecond, + RequestTimeout: 1 * time.Second, + ResponseHeaderTimeout: 1 * time.Second, + } + trap.SockTransport.RegisterLocation(service, sockPath) + trap.IsSocket = true + } + if u.Scheme == "https" { + // preference user-supplied TLS configuration + if cm.brokerTLS != nil { + trap.TLS = cm.brokerTLS + return trap, nil + } + + // api.circonus.com uses a public CA signed certificate + // trap.noit.circonus.net uses Circonus CA private certificate + // enterprise brokers use private CA certificate + if trap.URL.Hostname() == "api.circonus.com" { + return trap, nil + } + if cm.certPool == nil { - cm.loadCACert() + if err := cm.loadCACert(); err != nil { + return nil, errors.Wrap(err, "get submission url") + } } t := &tls.Config{ RootCAs: cm.certPool, @@ -406,18 +489,19 @@ func (cm *CheckManager) ResetTrap() error { } cm.trapURL = "" - cm.certPool = nil - err := cm.initializeTrapURL() - return err + cm.certPool = nil // force re-fetching CA cert (if custom TLS config not supplied) + return cm.initializeTrapURL() } // RefreshTrap check when the last time the URL was reset, reset if needed -func (cm *CheckManager) RefreshTrap() { +func (cm *CheckManager) RefreshTrap() error { if cm.trapURL == "" { - return + return nil } if time.Since(cm.trapLastUpdate) >= cm.trapMaxURLAge { - cm.ResetTrap() + return cm.ResetTrap() } + + return nil } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go index eb8603a84b8d..61c4986b71f7 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/metrics.go @@ -13,8 +13,7 @@ func (cm *CheckManager) IsMetricActive(name string) bool { cm.availableMetricsmu.Lock() defer cm.availableMetricsmu.Unlock() - active, _ := cm.availableMetrics[name] - return active + return cm.availableMetrics[name] } // ActivateMetric determines if a given metric should be activated diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go index 32cae5bb41c9..019cc8f8660e 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go @@ -30,22 +30,35 @@ package circonusgometrics import ( - "errors" + "bufio" + "bytes" + "fmt" "io/ioutil" "log" "os" "strconv" + "strings" "sync" "time" "github.com/circonus-labs/circonus-gometrics/api" "github.com/circonus-labs/circonus-gometrics/checkmgr" + "github.com/pkg/errors" ) const ( defaultFlushInterval = "10s" // 10 * time.Second ) +// Metric defines an individual metric +type Metric struct { + Type string `json:"_type"` + Value interface{} `json:"_value"` +} + +// Metrics holds host metrics +type Metrics map[string]Metric + // Config options for circonus-gometrics type Config struct { Log *log.Logger @@ -63,6 +76,12 @@ type Config struct { Interval string } +type prevMetrics struct { + metrics *Metrics + metricsmu sync.Mutex + ts time.Time +} + // CirconusMetrics state type CirconusMetrics struct { Log *log.Logger @@ -75,7 +94,9 @@ type CirconusMetrics struct { flushInterval time.Duration flushing bool flushmu sync.Mutex + packagingmu sync.Mutex check *checkmgr.CheckManager + lastMetrics *prevMetrics counters map[string]uint64 cm sync.Mutex @@ -83,7 +104,7 @@ type CirconusMetrics struct { counterFuncs map[string]func() uint64 cfm sync.Mutex - gauges map[string]string + gauges map[string]interface{} gm sync.Mutex gaugeFuncs map[string]func() int64 @@ -114,11 +135,12 @@ func New(cfg *Config) (*CirconusMetrics, error) { cm := &CirconusMetrics{ counters: make(map[string]uint64), counterFuncs: make(map[string]func() uint64), - gauges: make(map[string]string), + gauges: make(map[string]interface{}), gaugeFuncs: make(map[string]func() int64), histograms: make(map[string]*Histogram), text: make(map[string]string), textFuncs: make(map[string]func() string), + lastMetrics: &prevMetrics{}, } // Logging @@ -143,7 +165,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { dur, err := time.ParseDuration(fi) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing flush interval") } cm.flushInterval = dur } @@ -154,7 +176,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetCounters != "" { setting, err := strconv.ParseBool(cfg.ResetCounters) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset counters") } cm.resetCounters = setting } @@ -163,7 +185,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetGauges != "" { setting, err := strconv.ParseBool(cfg.ResetGauges) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset gauges") } cm.resetGauges = setting } @@ -172,7 +194,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetHistograms != "" { setting, err := strconv.ParseBool(cfg.ResetHistograms) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset histograms") } cm.resetHistograms = setting } @@ -181,7 +203,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { if cfg.ResetText != "" { setting, err := strconv.ParseBool(cfg.ResetText) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parsing reset text") } cm.resetText = setting } @@ -193,7 +215,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { check, err := checkmgr.New(&cfg.CheckManager) if err != nil { - return nil, err + return nil, errors.Wrap(err, "creating new check manager") } cm.check = check } @@ -202,10 +224,10 @@ func New(cfg *Config) (*CirconusMetrics, error) { cm.check.Initialize() // if automatic flush is enabled, start it. - // note: submit will jettison metrics until initialization has completed. + // NOTE: submit will jettison metrics until initialization has completed. if cm.flushInterval > time.Duration(0) { go func() { - for _ = range time.NewTicker(cm.flushInterval).C { + for range time.NewTicker(cm.flushInterval).C { cm.Flush() } }() @@ -216,7 +238,7 @@ func New(cfg *Config) (*CirconusMetrics, error) { // Start deprecated NOP, automatic flush is started in New if flush interval > 0. func (m *CirconusMetrics) Start() { - return + // nop } // Ready returns true or false indicating if the check is ready to accept metrics @@ -224,24 +246,18 @@ func (m *CirconusMetrics) Ready() bool { return m.check.IsReady() } -// Flush metrics kicks off the process of sending metrics to Circonus -func (m *CirconusMetrics) Flush() { - if m.flushing { - return - } - m.flushmu.Lock() - m.flushing = true - m.flushmu.Unlock() +func (m *CirconusMetrics) packageMetrics() (map[string]*api.CheckBundleMetric, Metrics) { + + m.packagingmu.Lock() + defer m.packagingmu.Unlock() if m.Debug { - m.Log.Println("[DEBUG] Flushing metrics") + m.Log.Println("[DEBUG] Packaging metrics") } - // check for new metrics and enable them automatically - newMetrics := make(map[string]*api.CheckBundleMetric) - counters, gauges, histograms, text := m.snapshot() - output := make(map[string]interface{}) + newMetrics := make(map[string]*api.CheckBundleMetric) + output := make(Metrics, len(counters)+len(gauges)+len(histograms)+len(text)) for name, value := range counters { send := m.check.IsMetricActive(name) if !send && m.check.ActivateMetric(name) { @@ -253,10 +269,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } + output[name] = Metric{Type: "L", Value: value} } } @@ -271,10 +284,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value, - } + output[name] = Metric{Type: m.getGaugeType(value), Value: value} } } @@ -289,10 +299,7 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "n", - "_value": value.DecStrings(), - } + output[name] = Metric{Type: "n", Value: value.DecStrings()} } } @@ -307,13 +314,85 @@ func (m *CirconusMetrics) Flush() { } } if send { - output[name] = map[string]interface{}{ - "_type": "s", - "_value": value, + output[name] = Metric{Type: "s", Value: value} + } + } + + m.lastMetrics.metricsmu.Lock() + defer m.lastMetrics.metricsmu.Unlock() + m.lastMetrics.metrics = &output + m.lastMetrics.ts = time.Now() + + return newMetrics, output +} + +// PromOutput returns lines of metrics in prom format +func (m *CirconusMetrics) PromOutput() (*bytes.Buffer, error) { + m.lastMetrics.metricsmu.Lock() + defer m.lastMetrics.metricsmu.Unlock() + + if m.lastMetrics.metrics == nil { + return nil, errors.New("no metrics available") + } + + var b bytes.Buffer + w := bufio.NewWriter(&b) + + ts := m.lastMetrics.ts.UnixNano() / int64(time.Millisecond) + + for name, metric := range *m.lastMetrics.metrics { + switch metric.Type { + case "n": + if strings.HasPrefix(fmt.Sprintf("%v", metric.Value), "[H[") { + continue // circonus histogram != prom "histogram" (aka percentile) } + case "s": + continue // text metrics unsupported } + fmt.Fprintf(w, "%s %v %d\n", name, metric.Value, ts) + } + + err := w.Flush() + if err != nil { + return nil, errors.Wrap(err, "flushing metric buffer") + } + + return &b, err +} + +// FlushMetrics flushes current metrics to a structure and returns it (does NOT send to Circonus) +func (m *CirconusMetrics) FlushMetrics() *Metrics { + m.flushmu.Lock() + if m.flushing { + m.flushmu.Unlock() + return &Metrics{} + } + + m.flushing = true + m.flushmu.Unlock() + + _, output := m.packageMetrics() + + m.flushmu.Lock() + m.flushing = false + m.flushmu.Unlock() + + return &output +} + +// Flush metrics kicks off the process of sending metrics to Circonus +func (m *CirconusMetrics) Flush() { + m.flushmu.Lock() + if m.flushing { + m.flushmu.Unlock() + return } + m.flushing = true + m.flushmu.Unlock() + + newMetrics, output := m.packageMetrics() + if len(output) > 0 { m.submit(output, newMetrics) } else { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go index 2b34961f12b2..2311b0a4143b 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/counter.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/counter.go @@ -4,6 +4,8 @@ package circonusgometrics +import "fmt" + // A Counter is a monotonically increasing unsigned integer. // // Use a counter to derive rates (e.g., record total number of requests, derive @@ -40,6 +42,19 @@ func (m *CirconusMetrics) RemoveCounter(metric string) { delete(m.counters, metric) } +// GetCounterTest returns the current value for a counter. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetCounterTest(metric string) (uint64, error) { + m.cm.Lock() + defer m.cm.Unlock() + + if val, ok := m.counters[metric]; ok { + return val, nil + } + + return 0, fmt.Errorf("Counter metric '%s' not found", metric) + +} + // SetCounterFunc set counter to a function [called at flush interval] func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) { m.cfm.Lock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go index b44236959668..4e05484ece64 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/gauge.go @@ -22,7 +22,48 @@ func (m *CirconusMetrics) Gauge(metric string, val interface{}) { func (m *CirconusMetrics) SetGauge(metric string, val interface{}) { m.gm.Lock() defer m.gm.Unlock() - m.gauges[metric] = m.gaugeValString(val) + m.gauges[metric] = val +} + +// AddGauge adds value to existing gauge +func (m *CirconusMetrics) AddGauge(metric string, val interface{}) { + m.gm.Lock() + defer m.gm.Unlock() + + v, ok := m.gauges[metric] + if !ok { + m.gauges[metric] = val + return + } + + switch val.(type) { + default: + // ignore it, unsupported type + case int: + m.gauges[metric] = v.(int) + val.(int) + case int8: + m.gauges[metric] = v.(int8) + val.(int8) + case int16: + m.gauges[metric] = v.(int16) + val.(int16) + case int32: + m.gauges[metric] = v.(int32) + val.(int32) + case int64: + m.gauges[metric] = v.(int64) + val.(int64) + case uint: + m.gauges[metric] = v.(uint) + val.(uint) + case uint8: + m.gauges[metric] = v.(uint8) + val.(uint8) + case uint16: + m.gauges[metric] = v.(uint16) + val.(uint16) + case uint32: + m.gauges[metric] = v.(uint32) + val.(uint32) + case uint64: + m.gauges[metric] = v.(uint64) + val.(uint64) + case float32: + m.gauges[metric] = v.(float32) + val.(float32) + case float64: + m.gauges[metric] = v.(float64) + val.(float64) + } } // RemoveGauge removes a gauge @@ -32,6 +73,18 @@ func (m *CirconusMetrics) RemoveGauge(metric string) { delete(m.gauges, metric) } +// GetGaugeTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetGaugeTest(metric string) (interface{}, error) { + m.gm.Lock() + defer m.gm.Unlock() + + if val, ok := m.gauges[metric]; ok { + return val, nil + } + + return nil, fmt.Errorf("Gauge metric '%s' not found", metric) +} + // SetGaugeFunc sets a gauge to a function [called at flush interval] func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) { m.gfm.Lock() @@ -46,36 +99,31 @@ func (m *CirconusMetrics) RemoveGaugeFunc(metric string) { delete(m.gaugeFuncs, metric) } -// gaugeValString converts an interface value (of a supported type) to a string -func (m *CirconusMetrics) gaugeValString(val interface{}) string { - vs := "" - switch v := val.(type) { - default: - // ignore it, unsupported type +// getGaugeType returns accurate resmon type for underlying type of gauge value +func (m *CirconusMetrics) getGaugeType(v interface{}) string { + mt := "n" + switch v.(type) { case int: - vs = fmt.Sprintf("%d", v) + mt = "i" case int8: - vs = fmt.Sprintf("%d", v) + mt = "i" case int16: - vs = fmt.Sprintf("%d", v) + mt = "i" case int32: - vs = fmt.Sprintf("%d", v) - case int64: - vs = fmt.Sprintf("%d", v) + mt = "i" case uint: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint8: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint16: - vs = fmt.Sprintf("%d", v) + mt = "I" case uint32: - vs = fmt.Sprintf("%d", v) + mt = "I" + case int64: + mt = "l" case uint64: - vs = fmt.Sprintf("%d", v) - case float32: - vs = fmt.Sprintf("%f", v) - case float64: - vs = fmt.Sprintf("%f", v) + mt = "L" } - return vs + + return mt } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go index 0ba1a3b2344d..d39f008de3ca 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/histogram.go @@ -5,6 +5,7 @@ package circonusgometrics import ( + "fmt" "sync" "github.com/circonus-labs/circonusllhist" @@ -27,6 +28,17 @@ func (m *CirconusMetrics) RecordValue(metric string, val float64) { m.SetHistogramValue(metric, val) } +// RecordCountForValue adds count n for value to a histogram +func (m *CirconusMetrics) RecordCountForValue(metric string, val float64, n int64) { + hist := m.NewHistogram(metric) + + m.hm.Lock() + hist.rw.Lock() + hist.hist.RecordValues(val, n) + hist.rw.Unlock() + m.hm.Unlock() +} + // SetHistogramValue adds a value to a histogram func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { hist := m.NewHistogram(metric) @@ -38,6 +50,18 @@ func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) { m.hm.Unlock() } +// GetHistogramTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.) +func (m *CirconusMetrics) GetHistogramTest(metric string) ([]string, error) { + m.hm.Lock() + defer m.hm.Unlock() + + if hist, ok := m.histograms[metric]; ok { + return hist.hist.DecStrings(), nil + } + + return []string{""}, fmt.Errorf("Histogram metric '%s' not found", metric) +} + // RemoveHistogram removes a histogram func (m *CirconusMetrics) RemoveHistogram(metric string) { m.hm.Lock() diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go index 3b0c0e0df57d..151f6c0a44b8 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/submit.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/submit.go @@ -6,8 +6,8 @@ package circonusgometrics import ( "bytes" + "context" "encoding/json" - "errors" "fmt" "io/ioutil" "log" @@ -18,9 +18,10 @@ import ( "github.com/circonus-labs/circonus-gometrics/api" "github.com/hashicorp/go-retryablehttp" + "github.com/pkg/errors" ) -func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) { +func (m *CirconusMetrics) submit(output Metrics, newMetrics map[string]*api.CheckBundleMetric) { // if there is nowhere to send metrics to, just return. if !m.check.IsReady() { @@ -43,6 +44,12 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s return } + // OK response from circonus-agent does not + // indicate how many metrics were received + if numStats == -1 { + numStats = len(output) + } + if m.Debug { m.Log.Printf("[DEBUG] %d stats sent\n", numStats) } @@ -51,7 +58,7 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { trap, err := m.check.GetSubmissionURL() if err != nil { - return 0, err + return 0, errors.Wrap(err, "trap call") } dataReader := bytes.NewReader(payload) @@ -65,10 +72,14 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { // keep last HTTP error in the event of retry failure var lastHTTPError error - retryPolicy := func(resp *http.Response, err error) (bool, error) { + retryPolicy := func(ctx context.Context, resp *http.Response, err error) (bool, error) { + if ctxErr := ctx.Err(); ctxErr != nil { + return false, ctxErr + } + if err != nil { lastHTTPError = err - return true, err + return true, errors.Wrap(err, "retry policy") } // Check the response code. We retry on 500-range responses to allow // the server time to recover, as 500's are typically not permanent @@ -98,20 +109,24 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { TLSClientConfig: trap.TLS, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, - DisableCompression: true, + DisableCompression: false, } - } else { + } else if trap.URL.Scheme == "http" { client.HTTPClient.Transport = &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ Timeout: 30 * time.Second, KeepAlive: 30 * time.Second, }).Dial, - TLSHandshakeTimeout: 10 * time.Second, DisableKeepAlives: true, MaxIdleConnsPerHost: -1, - DisableCompression: true, + DisableCompression: false, } + } else if trap.IsSocket { + m.Log.Println("using socket transport") + client.HTTPClient.Transport = trap.SockTransport + } else { + return 0, errors.Errorf("unknown scheme (%s), skipping submission", trap.URL.Scheme) } client.RetryWaitMin = 1 * time.Second client.RetryWaitMax = 5 * time.Second @@ -138,10 +153,17 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { if attempts == client.RetryMax { m.check.RefreshTrap() } - return 0, err + return 0, errors.Wrap(err, "trap call") } defer resp.Body.Close() + + // no content - expected result from + // circonus-agent when metrics accepted + if resp.StatusCode == http.StatusNoContent { + return -1, nil + } + body, err := ioutil.ReadAll(resp.Body) if err != nil { m.Log.Printf("[ERROR] reading body, proceeding. %s\n", err) @@ -152,7 +174,7 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) { m.Log.Printf("[ERROR] parsing body, proceeding. %v (%s)\n", err, body) } - if resp.StatusCode != 200 { + if resp.StatusCode != http.StatusOK { return 0, errors.New("[ERROR] bad response code: " + strconv.Itoa(resp.StatusCode)) } switch v := response["stats"].(type) { diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go index 73259a7b152c..87c80516bafe 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/tools.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/tools.go @@ -17,7 +17,6 @@ func (m *CirconusMetrics) TrackHTTPLatency(name string, handler func(http.Respon start := time.Now().UnixNano() handler(rw, req) elapsed := time.Now().UnixNano() - start - //hist := m.NewHistogram("go`HTTP`" + req.Method + "`" + name + "`latency") m.RecordValue("go`HTTP`"+req.Method+"`"+name+"`latency", float64(elapsed)/float64(time.Second)) } } diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/util.go b/vendor/github.com/circonus-labs/circonus-gometrics/util.go index 4428e8985141..3def2caa3f59 100644 --- a/vendor/github.com/circonus-labs/circonus-gometrics/util.go +++ b/vendor/github.com/circonus-labs/circonus-gometrics/util.go @@ -33,7 +33,7 @@ func (m *CirconusMetrics) Reset() { m.counters = make(map[string]uint64) m.counterFuncs = make(map[string]func() uint64) - m.gauges = make(map[string]string) + m.gauges = make(map[string]interface{}) m.gaugeFuncs = make(map[string]func() int64) m.histograms = make(map[string]*Histogram) m.text = make(map[string]string) @@ -41,7 +41,7 @@ func (m *CirconusMetrics) Reset() { } // snapshot returns a copy of the values of all registered counters and gauges. -func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string, h map[string]*circonusllhist.Histogram, t map[string]string) { +func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]interface{}, h map[string]*circonusllhist.Histogram, t map[string]string) { c = m.snapCounters() g = m.snapGauges() h = m.snapHistograms() @@ -68,33 +68,27 @@ func (m *CirconusMetrics) snapCounters() map[string]uint64 { for n, f := range m.counterFuncs { c[n] = f() } - if m.resetCounters && len(c) > 0 { - m.counterFuncs = make(map[string]func() uint64) - } return c } -func (m *CirconusMetrics) snapGauges() map[string]string { +func (m *CirconusMetrics) snapGauges() map[string]interface{} { m.gm.Lock() defer m.gm.Unlock() m.gfm.Lock() defer m.gfm.Unlock() - g := make(map[string]string, len(m.gauges)+len(m.gaugeFuncs)) + g := make(map[string]interface{}, len(m.gauges)+len(m.gaugeFuncs)) for n, v := range m.gauges { g[n] = v } if m.resetGauges && len(g) > 0 { - m.gauges = make(map[string]string) + m.gauges = make(map[string]interface{}) } for n, f := range m.gaugeFuncs { - g[n] = m.gaugeValString(f()) - } - if m.resetGauges && len(g) > 0 { - m.gaugeFuncs = make(map[string]func() int64) + g[n] = f() } return g @@ -136,9 +130,6 @@ func (m *CirconusMetrics) snapText() map[string]string { for n, f := range m.textFuncs { t[n] = f() } - if m.resetText && len(t) > 0 { - m.textFuncs = make(map[string]func() string) - } return t } diff --git a/vendor/vendor.json b/vendor/vendor.json index a99e4e6ec667..28cf923a3821 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -53,10 +53,10 @@ {"path":"github.com/bgentry/speakeasy/example","revision":"36e9cfdd690967f4f690c6edcc9ffacd006014a0"}, {"path":"github.com/boltdb/bolt","checksumSHA1":"R1Q34Pfnt197F/nCOO9kG8c+Z90=","comment":"v1.2.0","revision":"2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8","revisionTime":"2017-07-17T17:11:48Z","version":"v1.3.1","versionExact":"v1.3.1"}, {"path":"github.com/burntsushi/toml","checksumSHA1":"InIrfOI7Ys1QqZpCgTB4yW1G32M=","revision":"99064174e013895bbd9b025c31100bd1d9b590ca","revisionTime":"2016-07-17T15:07:09Z"}, - {"path":"github.com/circonus-labs/circonus-gometrics","checksumSHA1":"vhCArnFcQRM84iZcfMXka+2OzrE=","revision":"a2c28f079ec3d4fdc17ed577cca75bee88a2da25","revisionTime":"2017-01-31T13:03:52Z"}, - {"path":"github.com/circonus-labs/circonus-gometrics/api","checksumSHA1":"sInms3AjZrjG/WCRcmS/NSzLUT4=","revision":"a2c28f079ec3d4fdc17ed577cca75bee88a2da25","revisionTime":"2017-01-31T13:03:52Z"}, - {"path":"github.com/circonus-labs/circonus-gometrics/api/config","checksumSHA1":"bQhz/fcyZPmuHSH2qwC4ZtATy5c=","revision":"a2c28f079ec3d4fdc17ed577cca75bee88a2da25","revisionTime":"2017-01-31T13:03:52Z"}, - {"path":"github.com/circonus-labs/circonus-gometrics/checkmgr","checksumSHA1":"6hvd+YFb1eWFkc3pVcnOPPa2OVw=","revision":"a2c28f079ec3d4fdc17ed577cca75bee88a2da25","revisionTime":"2017-01-31T13:03:52Z"}, + {"path":"github.com/circonus-labs/circonus-gometrics","checksumSHA1":"/qvtQq5y0RZCsRyOOsan87V2AL0=","revision":"dd698dc110872f6e554abf74a7740fc363354086","revisionTime":"2018-08-20T20:09:38Z"}, + {"path":"github.com/circonus-labs/circonus-gometrics/api","checksumSHA1":"Lll5SHEsVto8Eqbrj7NVj7BfgDI=","revision":"dd698dc110872f6e554abf74a7740fc363354086","revisionTime":"2018-08-20T20:09:38Z"}, + {"path":"github.com/circonus-labs/circonus-gometrics/api/config","checksumSHA1":"bQhz/fcyZPmuHSH2qwC4ZtATy5c=","revision":"dd698dc110872f6e554abf74a7740fc363354086","revisionTime":"2018-08-20T20:09:38Z"}, + {"path":"github.com/circonus-labs/circonus-gometrics/checkmgr","checksumSHA1":"Ij8yB33E0Kk+GfTkNRoF1mG26dc=","revision":"dd698dc110872f6e554abf74a7740fc363354086","revisionTime":"2018-08-20T20:09:38Z"}, {"path":"github.com/circonus-labs/circonusllhist","checksumSHA1":"VbfeVqeOM+dTNxCmpvmYS0LwQn0=","revision":"7d649b46cdc2cd2ed102d350688a75a4fd7778c6","revisionTime":"2016-11-21T13:51:53Z"}, {"path":"github.com/containernetworking/cni/pkg/types","checksumSHA1":"NeAp/3+Hedu9tnMai+LihERPj84=","revision":"5c3c17164270150467498a32c71436c7cd5501be","revisionTime":"2016-06-02T16:00:07Z"}, {"path":"github.com/coreos/go-semver/semver","checksumSHA1":"97BsbXOiZ8+Kr+LIuZkQFtSj7H4=","revision":"1817cd4bea52af76542157eeabd74b057d1a199e","revisionTime":"2017-06-13T09:22:38Z"}, @@ -307,6 +307,7 @@ {"path":"github.com/stretchr/testify/require","checksumSHA1":"KqYmXUcuGwsvBL6XVsQnXsFb3LI=","revision":"c679ae2cc0cb27ec3293fea7e254e47386f05d69","revisionTime":"2018-03-14T08:05:35Z"}, {"path":"github.com/syndtr/gocapability/capability","checksumSHA1":"PgEklGW56c5RLHqQhORxt6jS3fY=","revision":"db04d3cc01c8b54962a58ec7e491717d06cfcc16","revisionTime":"2017-07-04T07:02:18Z"}, {"path":"github.com/tonnerre/golang-text","checksumSHA1":"t24KnvC9jRxiANVhpw2pqFpmEu8=","revision":"048ed3d792f7104850acbc8cfc01e5a6070f4c04","revisionTime":"2013-09-25T19:58:46Z"}, + {"path":"github.com/tv42/httpunix","checksumSHA1":"2xcr/mhxBFlDjpxe/Mc2Wb4RGR8=","revision":"b75d8614f926c077e48d85f1f8f7885b758c6225","revisionTime":"2015-04-27T01:28:21Z"}, {"path":"github.com/ugorji/go/codec","checksumSHA1":"8G1zvpE4gTtWQRuP/x2HPVDmflo=","revision":"0053ebfd9d0ee06ccefbfe17072021e1d4acebee","revisionTime":"2017-06-20T06:01:02Z"}, {"path":"github.com/ugorji/go/codec/codecgen","checksumSHA1":"OgParimNuU2CJqr3pcTympeQZUc=","revision":"5efa3251c7f7d05e5d9704a69a984ec9f1386a40","revisionTime":"2017-06-20T10:48:52Z"}, {"path":"github.com/ulikunitz/xz","checksumSHA1":"z2kAtVle4NFV2OExI85fZoTcsI4=","revision":"0c6b41e72360850ca4f98dc341fd999726ea007f","revisionTime":"2017-06-05T21:53:11Z"}, From 025ca166f34ee0fcaa7782c32249ff147ee54529 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Mon, 10 Sep 2018 15:08:34 -0700 Subject: [PATCH 4/6] Missing vendors --- .../circonus-gometrics/CHANGELOG.md | 38 ++++++++ .../circonus-gometrics/Gopkg.lock | 39 ++++++++ .../circonus-gometrics/Gopkg.toml | 37 ++++++++ .../hashicorp/vault/helper/hclutil/hcl.go | 36 +++++++ vendor/github.com/tv42/httpunix/LICENSE | 19 ++++ vendor/github.com/tv42/httpunix/httpunix.go | 95 +++++++++++++++++++ vendor/vendor.json | 1 + 7 files changed, 265 insertions(+) create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock create mode 100644 vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml create mode 100644 vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go create mode 100644 vendor/github.com/tv42/httpunix/LICENSE create mode 100644 vendor/github.com/tv42/httpunix/httpunix.go diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md b/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md new file mode 100644 index 000000000000..581157989fdb --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/CHANGELOG.md @@ -0,0 +1,38 @@ +# v2.2.1 + +* fix: if submission url host is 'api.circonus.com' do not use private CA in TLSConfig + +# v2.2.0 + +* fix: do not reset counter|gauge|text funcs after each snapshot (only on explicit call to Reset) +* upd: dashboards - optional widget attributes - which are structs - should be pointers for correct omission in json sent to api +* fix: dashboards - remove `omitempty` from required attributes +* fix: graphs - remove `omitempty` from required attributes +* fix: worksheets - correct attribute name, remove `omitempty` from required attributes +* fix: handle case where a broker has no external host or ip set + +# v2.1.2 + +* upd: breaking change in upstream repo +* upd: upstream deps + +# v2.1.1 + +* dep dependencies +* fix two instances of shadowed variables +* fix several documentation typos +* simplify (gofmt -s) +* remove an inefficient use of regexp.MatchString + +# v2.1.0 + +* Add unix socket capability for SubmissionURL `http+unix://...` +* Add `RecordCountForValue` function to histograms + +# v2.0.0 + +* gauges as `interface{}` + * change: `GeTestGauge(string) (string,error)` -> `GeTestGauge(string) (interface{},error)` + * add: `AddGauge(string, interface{})` to add a delta value to an existing gauge +* prom output candidate +* Add `CHANGELOG.md` to repository diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock new file mode 100644 index 000000000000..0f993c9a5e73 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.lock @@ -0,0 +1,39 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + branch = "master" + name = "github.com/circonus-labs/circonusllhist" + packages = ["."] + revision = "5eb751da55c6d3091faf3861ec5062ae91fee9d0" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-cleanhttp" + packages = ["."] + revision = "d5fe4b57a186c716b0e00b8c301cbd9b4182694d" + +[[projects]] + branch = "master" + name = "github.com/hashicorp/go-retryablehttp" + packages = ["."] + revision = "e651d75abec6fbd4f2c09508f72ae7af8a8b7171" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/tv42/httpunix" + packages = ["."] + revision = "b75d8614f926c077e48d85f1f8f7885b758c6225" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "6db34ba31cd011426f28b5db0dbe259c4dc3787fb2074b2c06cb382385a90242" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml new file mode 100644 index 000000000000..fa41a53c0a65 --- /dev/null +++ b/vendor/github.com/circonus-labs/circonus-gometrics/Gopkg.toml @@ -0,0 +1,37 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + branch = "master" + name = "github.com/circonus-labs/circonusllhist" + +[[constraint]] + branch = "master" + name = "github.com/hashicorp/go-retryablehttp" + +[[constraint]] + name = "github.com/pkg/errors" + version = "0.8.0" + +[[constraint]] + branch = "master" + name = "github.com/tv42/httpunix" diff --git a/vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go b/vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go new file mode 100644 index 000000000000..0b120367d5a6 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/hclutil/hcl.go @@ -0,0 +1,36 @@ +package hclutil + +import ( + "fmt" + + multierror "github.com/hashicorp/go-multierror" + "github.com/hashicorp/hcl/hcl/ast" +) + +// CheckHCLKeys checks whether the keys in the AST list contains any of the valid keys provided. +func CheckHCLKeys(node ast.Node, valid []string) error { + var list *ast.ObjectList + switch n := node.(type) { + case *ast.ObjectList: + list = n + case *ast.ObjectType: + list = n.List + default: + return fmt.Errorf("cannot check HCL keys of type %T", n) + } + + validMap := make(map[string]struct{}, len(valid)) + for _, v := range valid { + validMap[v] = struct{}{} + } + + var result error + for _, item := range list.Items { + key := item.Keys[0].Token.Value().(string) + if _, ok := validMap[key]; !ok { + result = multierror.Append(result, fmt.Errorf("invalid key %q on line %d", key, item.Assign.Line)) + } + } + + return result +} diff --git a/vendor/github.com/tv42/httpunix/LICENSE b/vendor/github.com/tv42/httpunix/LICENSE new file mode 100644 index 000000000000..33aec14578c4 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2013-2015 Tommi Virtanen. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/tv42/httpunix/httpunix.go b/vendor/github.com/tv42/httpunix/httpunix.go new file mode 100644 index 000000000000..95f5e95a8177 --- /dev/null +++ b/vendor/github.com/tv42/httpunix/httpunix.go @@ -0,0 +1,95 @@ +// Package httpunix provides a HTTP transport (net/http.RoundTripper) +// that uses Unix domain sockets instead of HTTP. +// +// This is useful for non-browser connections within the same host, as +// it allows using the file system for credentials of both client +// and server, and guaranteeing unique names. +// +// The URLs look like this: +// +// http+unix://LOCATION/PATH_ETC +// +// where LOCATION is translated to a file system path with +// Transport.RegisterLocation, and PATH_ETC follow normal http: scheme +// conventions. +package httpunix + +import ( + "bufio" + "errors" + "net" + "net/http" + "sync" + "time" +) + +// Scheme is the URL scheme used for HTTP over UNIX domain sockets. +const Scheme = "http+unix" + +// Transport is a http.RoundTripper that connects to Unix domain +// sockets. +type Transport struct { + DialTimeout time.Duration + RequestTimeout time.Duration + ResponseHeaderTimeout time.Duration + + mu sync.Mutex + // map a URL "hostname" to a UNIX domain socket path + loc map[string]string +} + +// RegisterLocation registers an URL location and maps it to the given +// file system path. +// +// Calling RegisterLocation twice for the same location is a +// programmer error, and causes a panic. +func (t *Transport) RegisterLocation(loc string, path string) { + t.mu.Lock() + defer t.mu.Unlock() + if t.loc == nil { + t.loc = make(map[string]string) + } + if _, exists := t.loc[loc]; exists { + panic("location " + loc + " already registered") + } + t.loc[loc] = path +} + +var _ http.RoundTripper = (*Transport)(nil) + +// RoundTrip executes a single HTTP transaction. See +// net/http.RoundTripper. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + if req.URL == nil { + return nil, errors.New("http+unix: nil Request.URL") + } + if req.URL.Scheme != Scheme { + return nil, errors.New("unsupported protocol scheme: " + req.URL.Scheme) + } + if req.URL.Host == "" { + return nil, errors.New("http+unix: no Host in request URL") + } + t.mu.Lock() + path, ok := t.loc[req.URL.Host] + t.mu.Unlock() + if !ok { + return nil, errors.New("unknown location: " + req.Host) + } + + c, err := net.DialTimeout("unix", path, t.DialTimeout) + if err != nil { + return nil, err + } + r := bufio.NewReader(c) + if t.RequestTimeout > 0 { + c.SetWriteDeadline(time.Now().Add(t.RequestTimeout)) + } + if err := req.Write(c); err != nil { + return nil, err + } + if t.ResponseHeaderTimeout > 0 { + c.SetReadDeadline(time.Now().Add(t.ResponseHeaderTimeout)) + } + resp, err := http.ReadResponse(r, req) + return resp, err +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 28cf923a3821..c09a8f098709 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -198,6 +198,7 @@ {"path":"github.com/hashicorp/vault","checksumSHA1":"eGzvBRMFD6ZB3A6uO750np7Om/E=","revision":"182ba68a9589d4cef95234134aaa498a686e3de3","revisionTime":"2016-08-21T23:40:57Z"}, {"path":"github.com/hashicorp/vault/api","checksumSHA1":"+B4wuJNerIUKNAVzld7CmMaNW5A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/compressutil","checksumSHA1":"bSdPFOHaTwEvM4PIvn0PZfn75jM=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, + {"path":"github.com/hashicorp/vault/helper/hclutil","checksumSHA1":"RlqPBLOexQ0jj6jomhiompWKaUg=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/jsonutil","checksumSHA1":"POgkM3GrjRFw6H3sw95YNEs552A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/parseutil","checksumSHA1":"HA2MV/2XI0HcoThSRxQCaBZR2ps=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/strutil","checksumSHA1":"HdVuYhZ5TuxeIFqi0jy2GHW7a4o=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, From 26ba1e159134ad6b1202eaa1d757a56adf803460 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Tue, 11 Sep 2018 10:19:03 -0700 Subject: [PATCH 5/6] remove toplevel vault dependency --- .../github.com/hashicorp/vault/CHANGELOG.md | 1170 ----------------- .../hashicorp/vault/CONTRIBUTING.md | 72 - vendor/github.com/hashicorp/vault/LICENSE | 363 ----- vendor/github.com/hashicorp/vault/Makefile | 63 - vendor/github.com/hashicorp/vault/README.md | 130 -- vendor/github.com/hashicorp/vault/make.bat | 107 -- vendor/vendor.json | 1 - 7 files changed, 1906 deletions(-) delete mode 100644 vendor/github.com/hashicorp/vault/CHANGELOG.md delete mode 100644 vendor/github.com/hashicorp/vault/CONTRIBUTING.md delete mode 100644 vendor/github.com/hashicorp/vault/LICENSE delete mode 100644 vendor/github.com/hashicorp/vault/Makefile delete mode 100644 vendor/github.com/hashicorp/vault/README.md delete mode 100644 vendor/github.com/hashicorp/vault/make.bat diff --git a/vendor/github.com/hashicorp/vault/CHANGELOG.md b/vendor/github.com/hashicorp/vault/CHANGELOG.md deleted file mode 100644 index d5be7357b235..000000000000 --- a/vendor/github.com/hashicorp/vault/CHANGELOG.md +++ /dev/null @@ -1,1170 +0,0 @@ -## 0.6.1 (August 22, 2016) - -DEPRECATIONS/BREAKING CHANGES: - - * Once the active node is 0.6.1, standby nodes must also be 0.6.1 in order to - connect to the HA cluster. We recommend following our [general upgrade - instructions](https://www.vaultproject.io/docs/install/upgrade.html) in - addition to 0.6.1-specific upgrade instructions to ensure that this is not - an issue. - * Status codes for sealed/uninitialized Vaults have changed to `503`/`501` - respectively. See the [version-specific upgrade - guide](https://www.vaultproject.io/docs/install/upgrade-to-0.6.1.html) for - more details. - * Root tokens (tokens with the `root` policy) can no longer be created except - by another root token or the `generate-root` endpoint. - * Issued certificates from the `pki` backend against new roles created or - modified after upgrading will contain a set of default key usages. - * The `dynamodb` physical data store no longer supports HA by default. It has - some non-ideal behavior around failover that was causing confusion. See the - [documentation](https://www.vaultproject.io/docs/config/index.html#ha_enabled) - for information on enabling HA mode. It is very important that this - configuration is added _before upgrading_. - * The `ldap` backend no longer searches for `memberOf` groups as part of its - normal flow. Instead, the desired group filter must be specified. This fixes - some errors and increases speed for directories with different structures, - but if this behavior has been relied upon, ensure that you see the upgrade - notes _before upgrading_. - * `app-id` is now deprecated with the addition of the new AppRole backend. - There are no plans to remove it, but we encourage using AppRole whenever - possible, as it offers enhanced functionality and can accommodate many more - types of authentication paradigms. - -FEATURES: - - * **AppRole Authentication Backend**: The `approle` backend is a - machine-oriented authentication backend that provides a similar concept to - App-ID while adding many missing features, including a pull model that - allows for the backend to generate authentication credentials rather than - requiring operators or other systems to push credentials in. It should be - useful in many more situations than App-ID. The inclusion of this backend - deprecates App-ID. [GH-1426] - * **Request Forwarding**: Vault servers can now forward requests to each other - rather than redirecting clients. This feature is off by default in 0.6.1 but - will be on by default in the next release. See the [HA concepts - page](https://www.vaultproject.io/docs/concepts/ha.html) for information on - enabling and configuring it. [GH-443] - * **Convergent Encryption in `Transit`**: The `transit` backend now supports a - convergent encryption mode where the same plaintext will produce the same - ciphertext. Although very useful in some situations, this has potential - security implications, which are mostly mitigated by requiring the use of - key derivation when convergent encryption is enabled. See [the `transit` - backend - documentation](https://www.vaultproject.io/docs/secrets/transit/index.html) - for more details. [GH-1537] - * **Improved LDAP Group Filters**: The `ldap` auth backend now uses templates - to define group filters, providing the capability to support some - directories that could not easily be supported before (especially specific - Active Directory setups with nested groups). [GH-1388] - * **Key Usage Control in `PKI`**: Issued certificates from roles created or - modified after upgrading contain a set of default key usages for increased - compatibility with OpenVPN and some other software. This set can be changed - when writing a role definition. Existing roles are unaffected. [GH-1552] - * **Request Retrying in the CLI and Go API**: Requests that fail with a `5xx` - error code will now retry after a backoff. The maximum total number of - retries (including disabling this functionality) can be set with an - environment variable. See the [environment variable - documentation](https://www.vaultproject.io/docs/commands/environment.html) - for more details. [GH-1594] - * **Service Discovery in `vault init`**: The new `-auto` option on `vault init` - will perform service discovery using Consul. When only one node is discovered, - it will be initialized and when more than one node is discovered, they will - be output for easy selection. See `vault init --help` for more details. [GH-1642] - * **MongoDB Secret Backend**: Generate dynamic unique MongoDB database - credentials based on configured roles. Sponsored by - [CommerceHub](http://www.commercehub.com/). [GH-1414] - * **Circonus Metrics Integration**: Vault can now send metrics to - [Circonus](http://www.circonus.com/). See the [configuration - documentation](https://www.vaultproject.io/docs/config/index.html) for - details. [GH-1646] - -IMPROVEMENTS: - - * audit: Added a unique identifier to each request which will also be found in - the request portion of the response. [GH-1650] - * auth/aws-ec2: Added a new constraint `bound_account_id` to the role - [GH-1523] - * auth/aws-ec2: Added a new constraint `bound_iam_role_arn` to the role - [GH-1522] - * auth/aws-ec2: Added `ttl` field for the role [GH-1703] - * auth/ldap, secret/cassandra, physical/consul: Clients with `tls.Config` - have the minimum TLS version set to 1.2 by default. This is configurable. - * auth/token: Added endpoint to list accessors [GH-1676] - * auth/token: Added `disallowed_policies` option to token store roles [GH-1681] - * auth/token: `root` or `sudo` tokens can now create periodic tokens via - `auth/token/create`; additionally, the same token can now be periodic and - have an explicit max TTL [GH-1725] - * build: Add support for building on Solaris/Illumos [GH-1726] - * cli: Output formatting in the presence of warnings in the response object - [GH-1533] - * cli: `vault auth` command supports a `-path` option to take in the path at - which the auth backend is enabled, thereby allowing authenticating against - different paths using the command options [GH-1532] - * cli: `vault auth -methods` will now display the config settings of the mount - [GH-1531] - * cli: `vault read/write/unwrap -field` now allows selecting token response - fields [GH-1567] - * cli: `vault write -field` now allows selecting wrapped response fields - [GH-1567] - * command/status: Version information and cluster details added to the output - of `vault status` command [GH-1671] - * core: Response wrapping is now enabled for login endpoints [GH-1588] - * core: The duration of leadership is now exported via events through - telemetry [GH-1625] - * core: `sys/capabilities-self` is now accessible as part of the `default` - policy [GH-1695] - * core: `sys/renew` is now accessible as part of the `default` policy [GH-1701] - * core: Unseal keys will now be returned in both hex and base64 forms, and - either can be used [GH-1734] - * core: Responses from most `/sys` endpoints now return normal `api.Secret` - structs in addition to the values they carried before. This means that - response wrapping can now be used with most authenticated `/sys` operations - [GH-1699] - * physical/etcd: Support `ETCD_ADDR` env var for specifying addresses [GH-1576] - * physical/consul: Allowing additional tags to be added to Consul service - registration via `service_tags` option [GH-1643] - * secret/aws: Listing of roles is supported now [GH-1546] - * secret/cassandra: Add `connect_timeout` value for Cassandra connection - configuration [GH-1581] - * secret/mssql,mysql,postgresql: Reading of connection settings is supported - in all the sql backends [GH-1515] - * secret/mysql: Added optional maximum idle connections value to MySQL - connection configuration [GH-1635] - * secret/mysql: Use a combination of the role name and token display name in - generated user names and allow the length to be controlled [GH-1604] - * secret/{cassandra,mssql,mysql,postgresql}: SQL statements can now be passed - in via one of four ways: a semicolon-delimited string, a base64-delimited - string, a serialized JSON string array, or a base64-encoded serialized JSON - string array [GH-1686] - * secret/ssh: Added `allowed_roles` to vault-ssh-helper's config and returning - role name as part of response of `verify` API - * secret/ssh: Added passthrough of command line arguments to `ssh` [GH-1680] - * sys/health: Added version information to the response of health status - endpoint [GH-1647] - * sys/health: Cluster information isbe returned as part of health status when - Vault is unsealed [GH-1671] - * sys/mounts: MountTable data is compressed before serializing to accommodate - thousands of mounts [GH-1693] - * website: The [token - concepts](https://www.vaultproject.io/docs/concepts/tokens.html) page has - been completely rewritten [GH-1725] - -BUG FIXES: - - * auth/aws-ec2: Added a nil check for stored whitelist identity object - during renewal [GH-1542] - * auth/cert: Fix panic if no client certificate is supplied [GH-1637] - * auth/token: Don't report that a non-expiring root token is renewable, as - attempting to renew it results in an error [GH-1692] - * cli: Don't retry a command when a redirection is received [GH-1724] - * core: Fix regression causing status codes to be `400` in most non-5xx error - cases [GH-1553] - * core: Fix panic that could occur during a leadership transition [GH-1627] - * physical/postgres: Remove use of prepared statements as this causes - connection multiplexing software to break [GH-1548] - * physical/consul: Multiple Vault nodes on the same machine leading to check ID - collisions were resulting in incorrect health check responses [GH-1628] - * physical/consul: Fix deregistration of health checks on exit [GH-1678] - * secret/postgresql: Check for existence of role before attempting deletion - [GH-1575] - * secret/postgresql: Handle revoking roles that have privileges on sequences - [GH-1573] - * secret/postgresql(,mysql,mssql): Fix incorrect use of database over - transaction object which could lead to connection exhaustion [GH-1572] - * secret/pki: Fix parsing CA bundle containing trailing whitespace [GH-1634] - * secret/pki: Fix adding email addresses as SANs [GH-1688] - * secret/pki: Ensure that CRL values are always UTC, per RFC [GH-1727] - * sys/seal-status: Fixed nil Cluster object while checking seal status [GH-1715] - -## 0.6.0 (June 14th, 2016) - -SECURITY: - - * Although `sys/revoke-prefix` was intended to revoke prefixes of secrets (via - lease IDs, which incorporate path information) and - `auth/token/revoke-prefix` was intended to revoke prefixes of tokens (using - the tokens' paths and, since 0.5.2, role information), in implementation - they both behaved exactly the same way since a single component in Vault is - responsible for managing lifetimes of both, and the type of the tracked - lifetime was not being checked. The end result was that either endpoint - could revoke both secret leases and tokens. We consider this a very minor - security issue as there are a number of mitigating factors: both endpoints - require `sudo` capability in addition to write capability, preventing - blanket ACL path globs from providing access; both work by using the prefix - to revoke as a part of the endpoint path, allowing them to be properly - ACL'd; and both are intended for emergency scenarios and users should - already not generally have access to either one. In order to prevent - confusion, we have simply removed `auth/token/revoke-prefix` in 0.6, and - `sys/revoke-prefix` will be meant for both leases and tokens instead. - -DEPRECATIONS/BREAKING CHANGES: - - * `auth/token/revoke-prefix` has been removed. See the security notice for - details. [GH-1280] - * Vault will now automatically register itself as the `vault` service when - using the `consul` backend and will perform its own health checks. See - the Consul backend documentation for information on how to disable - auto-registration and service checks. - * List operations that do not find any keys now return a `404` status code - rather than an empty response object [GH-1365] - * CA certificates issued from the `pki` backend no longer have associated - leases, and any CA certs already issued will ignore revocation requests from - the lease manager. This is to prevent CA certificates from being revoked - when the token used to issue the certificate expires; it was not be obvious - to users that they need to ensure that the token lifetime needed to be at - least as long as a potentially very long-lived CA cert. - -FEATURES: - - * **AWS EC2 Auth Backend**: Provides a secure introduction mechanism for AWS - EC2 instances allowing automated retrieval of Vault tokens. Unlike most - Vault authentication backends, this backend does not require first deploying - or provisioning security-sensitive credentials (tokens, username/password, - client certificates, etc). Instead, it treats AWS as a Trusted Third Party - and uses the cryptographically signed dynamic metadata information that - uniquely represents each EC2 instance. [Vault - Enterprise](https://www.hashicorp.com/vault.html) customers have access to a - turnkey client that speaks the backend API and makes access to a Vault token - easy. - * **Response Wrapping**: Nearly any response within Vault can now be wrapped - inside a single-use, time-limited token's cubbyhole, taking the [Cubbyhole - Authentication - Principles](https://www.hashicorp.com/blog/vault-cubbyhole-principles.html) - mechanism to its logical conclusion. Retrieving the original response is as - simple as a single API command or the new `vault unwrap` command. This makes - secret distribution easier and more secure, including secure introduction. - * **Azure Physical Backend**: You can now use Azure blob object storage as - your Vault physical data store [GH-1266] - * **Swift Physical Backend**: You can now use Swift blob object storage as - your Vault physical data store [GH-1425] - * **Consul Backend Health Checks**: The Consul backend will automatically - register a `vault` service and perform its own health checking. By default - the active node can be found at `active.vault.service.consul` and all with - standby nodes are `standby.vault.service.consul`. Sealed vaults are marked - critical and are not listed by default in Consul's service discovery. See - the documentation for details. [GH-1349] - * **Explicit Maximum Token TTLs**: You can now set explicit maximum TTLs on - tokens that do not honor changes in the system- or mount-set values. This is - useful, for instance, when the max TTL of the system or the `auth/token` - mount must be set high to accommodate certain needs but you want more - granular restrictions on tokens being issued directly from the Token - authentication backend at `auth/token`. [GH-1399] - * **Non-Renewable Tokens**: When creating tokens directly through the token - authentication backend, you can now specify in both token store roles and - the API whether or not a token should be renewable, defaulting to `true`. - * **RabbitMQ Secret Backend**: Vault can now generate credentials for - RabbitMQ. Vhosts and tags can be defined within roles. [GH-788] - -IMPROVEMENTS: - - * audit: Add the DisplayName value to the copy of the Request object embedded - in the associated Response, to match the original Request object [GH-1387] - * audit: Enable auditing of the `seal` and `step-down` commands [GH-1435] - * backends: Remove most `root`/`sudo` paths in favor of normal ACL mechanisms. - A particular exception are any current MFA paths. A few paths in `token` and - `sys` also require `root` or `sudo`. [GH-1478] - * command/auth: Restore the previous authenticated token if the `auth` command - fails to authenticate the provided token [GH-1233] - * command/write: `-format` and `-field` can now be used with the `write` - command [GH-1228] - * core: Add `mlock` support for FreeBSD, OpenBSD, and Darwin [GH-1297] - * core: Don't keep lease timers around when tokens are revoked [GH-1277] - * core: If using the `disable_cache` option, caches for the policy store and - the `transit` backend are now disabled as well [GH-1346] - * credential/cert: Renewal requests are rejected if the set of policies has - changed since the token was issued [GH-477] - * credential/cert: Check CRLs for specific non-CA certs configured in the - backend [GH-1404] - * credential/ldap: If `groupdn` is not configured, skip searching LDAP and - only return policies for local groups, plus a warning [GH-1283] - * credential/ldap: `vault list` support for users and groups [GH-1270] - * credential/ldap: Support for the `memberOf` attribute for group membership - searching [GH-1245] - * credential/userpass: Add list support for users [GH-911] - * credential/userpass: Remove user configuration paths from requiring sudo, in - favor of normal ACL mechanisms [GH-1312] - * credential/token: Sanitize policies and add `default` policies in appropriate - places [GH-1235] - * credential/token: Setting the renewable status of a token is now possible - via `vault token-create` and the API. The default is true, but tokens can be - specified as non-renewable. [GH-1499] - * secret/aws: Use chain credentials to allow environment/EC2 instance/shared - providers [GH-307] - * secret/aws: Support for STS AssumeRole functionality [GH-1318] - * secret/consul: Reading consul access configuration supported. The response - will contain non-sensitive information only [GH-1445] - * secret/pki: Added `exclude_cn_from_sans` field to prevent adding the CN to - DNS or Email Subject Alternate Names [GH-1220] - * secret/pki: Added list support for certificates [GH-1466] - * sys/capabilities: Enforce ACL checks for requests that query the capabilities - of a token on a given path [GH-1221] - * sys/health: Status information can now be retrieved with `HEAD` [GH-1509] - -BUG FIXES: - - * command/read: Fix panic when using `-field` with a non-string value [GH-1308] - * command/token-lookup: Fix TTL showing as 0 depending on how a token was - created. This only affected the value shown at lookup, not the token - behavior itself. [GH-1306] - * command/various: Tell the JSON decoder to not convert all numbers to floats; - fixes some various places where numbers were showing up in scientific - notation - * command/server: Prioritized `devRootTokenID` and `devListenAddress` flags - over their respective env vars [GH-1480] - * command/ssh: Provided option to disable host key checking. The automated - variant of `vault ssh` command uses `sshpass` which was failing to handle - host key checking presented by the `ssh` binary. [GH-1473] - * core: Properly persist mount-tuned TTLs for auth backends [GH-1371] - * core: Don't accidentally crosswire SIGINT to the reload handler [GH-1372] - * credential/github: Make organization comparison case-insensitive during - login [GH-1359] - * credential/github: Fix panic when renewing a token created with some earlier - versions of Vault [GH-1510] - * credential/github: The token used to log in via `vault auth` can now be - specified in the `VAULT_AUTH_GITHUB_TOKEN` environment variable [GH-1511] - * credential/ldap: Fix problem where certain error conditions when configuring - or opening LDAP connections would cause a panic instead of return a useful - error message [GH-1262] - * credential/token: Fall back to normal parent-token semantics if - `allowed_policies` is empty for a role. Using `allowed_policies` of - `default` resulted in the same behavior anyways. [GH-1276] - * credential/token: Fix issues renewing tokens when using the "suffix" - capability of token roles [GH-1331] - * credential/token: Fix lookup via POST showing the request token instead of - the desired token [GH-1354] - * credential/various: Fix renewal conditions when `default` policy is not - contained in the backend config [GH-1256] - * physical/s3: Don't panic in certain error cases from bad S3 responses [GH-1353] - * secret/consul: Use non-pooled Consul API client to avoid leaving files open - [GH-1428] - * secret/pki: Don't check whether a certificate is destined to be a CA - certificate if sign-verbatim endpoint is used [GH-1250] - -## 0.5.3 (May 27th, 2016) - -SECURITY: - - * Consul ACL Token Revocation: An issue was reported to us indicating that - generated Consul ACL tokens were not being properly revoked. Upon - investigation, we found that this behavior was reproducible in a specific - scenario: when a generated lease for a Consul ACL token had been renewed - prior to revocation. In this case, the generated token was not being - properly persisted internally through the renewal function, leading to an - error during revocation due to the missing token. Unfortunately, this was - coded as a user error rather than an internal error, and the revocation - logic was expecting internal errors if revocation failed. As a result, the - revocation logic believed the revocation to have succeeded when it in fact - failed, causing the lease to be dropped while the token was still valid - within Consul. In this release, the Consul backend properly persists the - token through renewals, and the revocation logic has been changed to - consider any error type to have been a failure to revoke, causing the lease - to persist and attempt to be revoked later. - -We have written an example shell script that searches through Consul's ACL -tokens and looks for those generated by Vault, which can be used as a template -for a revocation script as deemed necessary for any particular security -response. The script is available at -https://gist.github.com/jefferai/6233c2963f9407a858d84f9c27d725c0 - -Please note that any outstanding leases for Consul tokens produced prior to -0.5.3 that have been renewed will continue to exhibit this behavior. As a -result, we recommend either revoking all tokens produced by the backend and -issuing new ones, or if needed, a more advanced variant of the provided example -could use the timestamp embedded in each generated token's name to decide which -tokens are too old and should be deleted. This could then be run periodically -up until the maximum lease time for any outstanding pre-0.5.3 tokens has -expired. - -This is a security-only release. There are no other code changes since 0.5.2. -The binaries have one additional change: they are built against Go 1.6.1 rather -than Go 1.6, as Go 1.6.1 contains two security fixes to the Go programming -language itself. - -## 0.5.2 (March 16th, 2016) - -FEATURES: - - * **MSSQL Backend**: Generate dynamic unique MSSQL database credentials based - on configured roles [GH-998] - * **Token Accessors**: Vault now provides an accessor with each issued token. - This accessor is an identifier that can be used for a limited set of - actions, notably for token revocation. This value can be logged in - plaintext to audit logs, and in combination with the plaintext metadata - logged to audit logs, provides a searchable and straightforward way to - revoke particular users' or services' tokens in many cases. To enable - plaintext audit logging of these accessors, set `hmac_accessor=false` when - enabling an audit backend. - * **Token Credential Backend Roles**: Roles can now be created in the `token` - credential backend that allow modifying token behavior in ways that are not - otherwise exposed or easily delegated. This allows creating tokens with a - fixed set (or subset) of policies (rather than a subset of the calling - token's), periodic tokens with a fixed TTL but no expiration, specified - prefixes, and orphans. - * **Listener Certificate Reloading**: Vault's configured listeners now reload - their TLS certificate and private key when the Vault process receives a - SIGHUP. - -IMPROVEMENTS: - - * auth/token: Endpoints optionally accept tokens from the HTTP body rather - than just from the URLs [GH-1211] - * auth/token,sys/capabilities: Added new endpoints - `auth/token/lookup-accessor`, `auth/token/revoke-accessor` and - `sys/capabilities-accessor`, which enables performing the respective actions - with just the accessor of the tokens, without having access to the actual - token [GH-1188] - * core: Ignore leading `/` in policy paths [GH-1170] - * core: Ignore leading `/` in mount paths [GH-1172] - * command/policy-write: Provided HCL is now validated for format violations - and provides helpful information around where the violation occurred - [GH-1200] - * command/server: The initial root token ID when running in `-dev` mode can - now be specified via `-dev-root-token-id` or the environment variable - `VAULT_DEV_ROOT_TOKEN_ID` [GH-1162] - * command/server: The listen address when running in `-dev` mode can now be - specified via `-dev-listen-address` or the environment variable - `VAULT_DEV_LISTEN_ADDRESS` [GH-1169] - * command/server: The configured listeners now reload their TLS - certificates/keys when Vault is SIGHUP'd [GH-1196] - * command/step-down: New `vault step-down` command and API endpoint to force - the targeted node to give up active status, but without sealing. The node - will wait ten seconds before attempting to grab the lock again. [GH-1146] - * command/token-renew: Allow no token to be passed in; use `renew-self` in - this case. Change the behavior for any token being passed in to use `renew`. - [GH-1150] - * credential/app-id: Allow `app-id` parameter to be given in the login path; - this causes the `app-id` to be part of the token path, making it easier to - use with `revoke-prefix` [GH-424] - * credential/cert: Non-CA certificates can be used for authentication. They - must be matched exactly (issuer and serial number) for authentication, and - the certificate must carry the client authentication or 'any' extended usage - attributes. [GH-1153] - * credential/cert: Subject and Authority key IDs are output in metadata; this - allows more flexible searching/revocation in the audit logs [GH-1183] - * credential/cert: Support listing configured certs [GH-1212] - * credential/userpass: Add support for `create`/`update` capability - distinction in user path, and add user-specific endpoints to allow changing - the password and policies [GH-1216] - * credential/token: Add roles [GH-1155] - * secret/mssql: Add MSSQL backend [GH-998] - * secret/pki: Add revocation time (zero or Unix epoch) to `pki/cert/SERIAL` - endpoint [GH-1180] - * secret/pki: Sanitize serial number in `pki/revoke` endpoint to allow some - other formats [GH-1187] - * secret/ssh: Added documentation for `ssh/config/zeroaddress` endpoint. - [GH-1154] - * sys: Added new endpoints `sys/capabilities` and `sys/capabilities-self` to - fetch the capabilities of a token on a given path [GH-1171] - * sys: Added `sys/revoke-force`, which enables a user to ignore backend errors - when revoking a lease, necessary in some emergency/failure scenarios - [GH-1168] - * sys: The return codes from `sys/health` can now be user-specified via query - parameters [GH-1199] - -BUG FIXES: - - * logical/cassandra: Apply hyphen/underscore replacement to the entire - generated username, not just the UUID, in order to handle token display name - hyphens [GH-1140] - * physical/etcd: Output actual error when cluster sync fails [GH-1141] - * vault/expiration: Not letting the error responses from the backends to skip - during renewals [GH-1176] - -## 0.5.1 (February 25th, 2016) - -DEPRECATIONS/BREAKING CHANGES: - - * RSA keys less than 2048 bits are no longer supported in the PKI backend. - 1024-bit keys are considered unsafe and are disallowed in the Internet PKI. - The `pki` backend has enforced SHA256 hashes in signatures from the - beginning, and software that can handle these hashes should be able to - handle larger key sizes. [GH-1095] - * The PKI backend now does not automatically delete expired certificates, - including from the CRL. Doing so could lead to a situation where a time - mismatch between the Vault server and clients could result in a certificate - that would not be considered expired by a client being removed from the CRL. - The new `pki/tidy` endpoint can be used to trigger expirations. [GH-1129] - * The `cert` backend now performs a variant of channel binding at renewal time - for increased security. In order to not overly burden clients, a notion of - identity is used. This functionality can be disabled. See the 0.5.1 upgrade - guide for more specific information [GH-1127] - -FEATURES: - - * **Codebase Audit**: Vault's 0.5 codebase was audited by iSEC. (The terms of - the audit contract do not allow us to make the results public.) [GH-220] - -IMPROVEMENTS: - - * api: The `VAULT_TLS_SERVER_NAME` environment variable can be used to control - the SNI header during TLS connections [GH-1131] - * api/health: Add the server's time in UTC to health responses [GH-1117] - * command/rekey and command/generate-root: These now return the status at - attempt initialization time, rather than requiring a separate fetch for the - nonce [GH-1054] - * credential/cert: Don't require root/sudo tokens for the `certs/` and `crls/` - paths; use normal ACL behavior instead [GH-468] - * credential/github: The validity of the token used for login will be checked - at renewal time [GH-1047] - * credential/github: The `config` endpoint no longer requires a root token; - normal ACL path matching applies - * deps: Use the standardized Go 1.6 vendoring system - * secret/aws: Inform users of AWS-imposed policy restrictions around STS - tokens if they attempt to use an invalid policy [GH-1113] - * secret/mysql: The MySQL backend now allows disabling verification of the - `connection_url` [GH-1096] - * secret/pki: Submitted CSRs are now verified to have the correct key type and - minimum number of bits according to the role. The exception is intermediate - CA signing and the `sign-verbatim` path [GH-1104] - * secret/pki: New `tidy` endpoint to allow expunging expired certificates. - [GH-1129] - * secret/postgresql: The PostgreSQL backend now allows disabling verification - of the `connection_url` [GH-1096] - * secret/ssh: When verifying an OTP, return 400 if it is not valid instead of - 204 [GH-1086] - * credential/app-id: App ID backend will check the validity of app-id and user-id - during renewal time [GH-1039] - * credential/cert: TLS Certificates backend, during renewal, will now match the - client identity with the client identity used during login [GH-1127] - -BUG FIXES: - - * credential/ldap: Properly escape values being provided to search filters - [GH-1100] - * secret/aws: Capping on length of usernames for both IAM and STS types - [GH-1102] - * secret/pki: If a cert is not found during lookup of a serial number, - respond with a 400 rather than a 500 [GH-1085] - * secret/postgresql: Add extra revocation statements to better handle more - permission scenarios [GH-1053] - * secret/postgresql: Make connection_url work properly [GH-1112] - -## 0.5.0 (February 10, 2016) - -SECURITY: - - * Previous versions of Vault could allow a malicious user to hijack the rekey - operation by canceling an operation in progress and starting a new one. The - practical application of this is very small. If the user was an unseal key - owner, they could attempt to do this in order to either receive unencrypted - reseal keys or to replace the PGP keys used for encryption with ones under - their control. However, since this would invalidate any rekey progress, they - would need other unseal key holders to resubmit, which would be rather - suspicious during this manual operation if they were not also the original - initiator of the rekey attempt. If the user was not an unseal key holder, - there is no benefit to be gained; the only outcome that could be attempted - would be a denial of service against a legitimate rekey operation by sending - cancel requests over and over. Thanks to Josh Snyder for the report! - -DEPRECATIONS/BREAKING CHANGES: - - * `s3` physical backend: Environment variables are now preferred over - configuration values. This makes it behave similar to the rest of Vault, - which, in increasing order of preference, uses values from the configuration - file, environment variables, and CLI flags. [GH-871] - * `etcd` physical backend: `sync` functionality is now supported and turned on - by default. This can be disabled. [GH-921] - * `transit`: If a client attempts to encrypt a value with a key that does not - yet exist, what happens now depends on the capabilities set in the client's - ACL policies. If the client has `create` (or `create` and `update`) - capability, the key will upsert as in the past. If the client has `update` - capability, they will receive an error. [GH-1012] - * `token-renew` CLI command: If the token given for renewal is the same as the - client token, the `renew-self` endpoint will be used in the API. Given that - the `default` policy (by default) allows all clients access to the - `renew-self` endpoint, this makes it much more likely that the intended - operation will be successful. [GH-894] - * Token `lookup`: the `ttl` value in the response now reflects the actual - remaining TTL rather than the original TTL specified when the token was - created; this value is now located in `creation_ttl` [GH-986] - * Vault no longer uses grace periods on leases or token TTLs. Uncertainty - about the length grace period for any given backend could cause confusion - and uncertainty. [GH-1002] - * `rekey`: Rekey now requires a nonce to be supplied with key shares. This - nonce is generated at the start of a rekey attempt and is unique for that - attempt. - * `status`: The exit code for the `status` CLI command is now `2` for an - uninitialized Vault instead of `1`. `1` is returned for errors. This better - matches the rest of the CLI. - -FEATURES: - - * **Split Data/High Availability Physical Backends**: You can now configure - two separate physical backends: one to be used for High Availability - coordination and another to be used for encrypted data storage. See the - [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-395] - * **Fine-Grained Access Control**: Policies can now use the `capabilities` set - to specify fine-grained control over operations allowed on a path, including - separation of `sudo` privileges from other privileges. These can be mixed - and matched in any way desired. The `policy` value is kept for backwards - compatibility. See the [updated policy - documentation](https://vaultproject.io/docs/concepts/policies.html) for - details. [GH-914] - * **List Support**: Listing is now supported via the API and the new `vault - list` command. This currently supports listing keys in the `generic` and - `cubbyhole` backends and a few other places (noted in the IMPROVEMENTS - section below). Different parts of the API and backends will need to - implement list capabilities in ways that make sense to particular endpoints, - so further support will appear over time. [GH-617] - * **Root Token Generation via Unseal Keys**: You can now use the - `generate-root` CLI command to generate new orphaned, non-expiring root - tokens in case the original is lost or revoked (accidentally or - purposefully). This requires a quorum of unseal key holders. The output - value is protected via any PGP key of the initiator's choosing or a one-time - pad known only to the initiator (a suitable pad can be generated via the - `-genotp` flag to the command. [GH-915] - * **Unseal Key Archiving**: You can now optionally have Vault store your - unseal keys in your chosen physical store for disaster recovery purposes. - This option is only available when the keys are encrypted with PGP. [GH-907] - * **Keybase Support for PGP Encryption Keys**: You can now specify Keybase - users when passing in PGP keys to the `init`, `rekey`, and `generate-root` - CLI commands. Public keys for these users will be fetched automatically. - [GH-901] - * **DynamoDB HA Physical Backend**: There is now a new, community-supported - HA-enabled physical backend using Amazon DynamoDB. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-878] - * **PostgreSQL Physical Backend**: There is now a new, community-supported - physical backend using PostgreSQL. See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - [GH-945] - * **STS Support in AWS Secret Backend**: You can now use the AWS secret - backend to fetch STS tokens rather than IAM users. [GH-927] - * **Speedups in the transit backend**: The `transit` backend has gained a - cache, and now loads only the working set of keys (e.g. from the - `min_decryption_version` to the current key version) into its working set. - This provides large speedups and potential memory savings when the `rotate` - feature of the backend is used heavily. - -IMPROVEMENTS: - - * cli: Output secrets sorted by key name [GH-830] - * cli: Support YAML as an output format [GH-832] - * cli: Show an error if the output format is incorrect, rather than falling - back to an empty table [GH-849] - * cli: Allow setting the `advertise_addr` for HA via the - `VAULT_ADVERTISE_ADDR` environment variable [GH-581] - * cli/generate-root: Add generate-root and associated functionality [GH-915] - * cli/init: Add `-check` flag that returns whether Vault is initialized - [GH-949] - * cli/server: Use internal functions for the token-helper rather than shelling - out, which fixes some problems with using a static binary in Docker or paths - with multiple spaces when launching in `-dev` mode [GH-850] - * cli/token-lookup: Add token-lookup command [GH-892] - * command/{init,rekey}: Allow ASCII-armored keychain files to be arguments for - `-pgp-keys` [GH-940] - * conf: Use normal bool values rather than empty/non-empty for the - `tls_disable` option [GH-802] - * credential/ldap: Add support for binding, both anonymously (to discover a - user DN) and via a username and password [GH-975] - * credential/token: Add `last_renewal_time` to token lookup calls [GH-896] - * credential/token: Change `ttl` to reflect the current remaining TTL; the - original value is in `creation_ttl` [GH-1007] - * helper/certutil: Add ability to parse PKCS#8 bundles [GH-829] - * logical/aws: You can now get STS tokens instead of IAM users [GH-927] - * logical/cassandra: Add `protocol_version` parameter to set the CQL proto - version [GH-1005] - * logical/cubbyhole: Add cubbyhole access to default policy [GH-936] - * logical/mysql: Add list support for roles path [GH-984] - * logical/pki: Fix up key usages being specified for CAs [GH-989] - * logical/pki: Add list support for roles path [GH-985] - * logical/pki: Allow `pem_bundle` to be specified as the format, which - provides a concatenated PEM bundle of returned values [GH-1008] - * logical/pki: Add 30 seconds of slack to the validity start period to - accommodate some clock skew in machines [GH-1036] - * logical/postgres: Add `max_idle_connections` parameter [GH-950] - * logical/postgres: Add list support for roles path - * logical/ssh: Add list support for roles path [GH-983] - * logical/transit: Keys are archived and only keys between the latest version - and `min_decryption_version` are loaded into the working set. This can - provide a very large speed increase when rotating keys very often. [GH-977] - * logical/transit: Keys are now cached, which should provide a large speedup - in most cases [GH-979] - * physical/cache: Use 2Q cache instead of straight LRU [GH-908] - * physical/etcd: Support basic auth [GH-859] - * physical/etcd: Support sync functionality and enable by default [GH-921] - -BUG FIXES: - - * api: Correct the HTTP verb used in the LookupSelf method [GH-887] - * api: Fix the output of `Sys().MountConfig(...)` to return proper values - [GH-1017] - * command/read: Fix panic when an empty argument was given [GH-923] - * command/ssh: Fix panic when username lookup fails [GH-886] - * core: When running in standalone mode, don't advertise that we are active - until post-unseal setup completes [GH-872] - * core: Update go-cleanhttp dependency to ensure idle connections aren't - leaked [GH-867] - * core: Don't allow tokens to have duplicate policies [GH-897] - * core: Fix regression in `sys/renew` that caused information stored in the - Secret part of the response to be lost [GH-912] - * physical: Use square brackets when setting an IPv6-based advertise address - as the auto-detected advertise address [GH-883] - * physical/s3: Use an initialized client when using IAM roles to fix a - regression introduced against newer versions of the AWS Go SDK [GH-836] - * secret/pki: Fix a condition where unmounting could fail if the CA - certificate was not properly loaded [GH-946] - * secret/ssh: Fix a problem where SSH connections were not always closed - properly [GH-942] - -MISC: - - * Clarified our stance on support for community-derived physical backends. - See the [configuration - documentation](https://vaultproject.io/docs/config/index.html) for details. - * Add `vault-java` to libraries [GH-851] - * Various minor documentation fixes and improvements [GH-839] [GH-854] - [GH-861] [GH-876] [GH-899] [GH-900] [GH-904] [GH-923] [GH-924] [GH-958] - [GH-959] [GH-981] [GH-990] [GH-1024] [GH-1025] - -BUILD NOTE: - - * The HashiCorp-provided binary release of Vault 0.5.0 is built against a - patched version of Go 1.5.3 containing two specific bug fixes affecting TLS - certificate handling. These fixes are in the Go 1.6 tree and were - cherry-picked on top of stock Go 1.5.3. If you want to examine the way in - which the releases were built, please look at our [cross-compilation - Dockerfile](https://github.com/hashicorp/vault/blob/v0.5.0/scripts/cross/Dockerfile-patched-1.5.3). - -## 0.4.1 (January 13, 2016) - -SECURITY: - - * Build against Go 1.5.3 to mitigate a security vulnerability introduced in - Go 1.5. For more information, please see - https://groups.google.com/forum/#!topic/golang-dev/MEATuOi_ei4 - -This is a security-only release; other than the version number and building -against Go 1.5.3, there are no changes from 0.4.0. - -## 0.4.0 (December 10, 2015) - -DEPRECATIONS/BREAKING CHANGES: - - * Policy Name Casing: Policy names are now normalized to lower-case on write, - helping prevent accidental case mismatches. For backwards compatibility, - policy names are not currently normalized when reading or deleting. [GH-676] - * Default etcd port number: the default connection string for the `etcd` - physical store uses port 2379 instead of port 4001, which is the port used - by the supported version 2.x of etcd. [GH-753] - * As noted below in the FEATURES section, if your Vault installation contains - a policy called `default`, new tokens created will inherit this policy - automatically. - * In the PKI backend there have been a few minor breaking changes: - * The token display name is no longer a valid option for providing a base - domain for issuance. Since this name is prepended with the name of the - authentication backend that issued it, it provided a faulty use-case at best - and a confusing experience at worst. We hope to figure out a better - per-token value in a future release. - * The `allowed_base_domain` parameter has been changed to `allowed_domains`, - which accepts a comma-separated list of domains. This allows issuing - certificates with DNS subjects across multiple domains. If you had a - configured `allowed_base_domain` parameter, it will be migrated - automatically when the role is read (either via a normal read, or via - issuing a certificate). - -FEATURES: - - * **Significantly Enhanced PKI Backend**: The `pki` backend can now generate - and sign root CA certificates and intermediate CA CSRs. It can also now sign - submitted client CSRs, as well as a significant number of other - enhancements. See the updated documentation for the full API. [GH-666] - * **CRL Checking for Certificate Authentication**: The `cert` backend now - supports pushing CRLs into the mount and using the contained serial numbers - for revocation checking. See the documentation for the `cert` backend for - more info. [GH-330] - * **Default Policy**: Vault now ensures that a policy named `default` is added - to every token. This policy cannot be deleted, but it can be modified - (including to an empty policy). There are three endpoints allowed in the - default `default` policy, related to token self-management: `lookup-self`, - which allows a token to retrieve its own information, and `revoke-self` and - `renew-self`, which are self-explanatory. If your existing Vault - installation contains a policy called `default`, it will not be overridden, - but it will be added to each new token created. You can override this - behavior when using manual token creation (i.e. not via an authentication - backend) by setting the "no_default_policy" flag to true. [GH-732] - -IMPROVEMENTS: - - * api: API client now uses a 60 second timeout instead of indefinite [GH-681] - * api: Implement LookupSelf, RenewSelf, and RevokeSelf functions for auth - tokens [GH-739] - * api: Standardize environment variable reading logic inside the API; the CLI - now uses this but can still override via command-line parameters [GH-618] - * audit: HMAC-SHA256'd client tokens are now stored with each request entry. - Previously they were only displayed at creation time; this allows much - better traceability of client actions. [GH-713] - * audit: There is now a `sys/audit-hash` endpoint that can be used to generate - an HMAC-SHA256'd value from provided data using the given audit backend's - salt [GH-784] - * core: The physical storage read cache can now be disabled via - "disable_cache" [GH-674] - * core: The unsealing process can now be reset midway through (this feature - was documented before, but not enabled) [GH-695] - * core: Tokens can now renew themselves [GH-455] - * core: Base64-encoded PGP keys can be used with the CLI for `init` and - `rekey` operations [GH-653] - * core: Print version on startup [GH-765] - * core: Access to `sys/policy` and `sys/mounts` now uses the normal ACL system - instead of requiring a root token [GH-769] - * credential/token: Display whether or not a token is an orphan in the output - of a lookup call [GH-766] - * logical: Allow `.` in path-based variables in many more locations [GH-244] - * logical: Responses now contain a "warnings" key containing a list of - warnings returned from the server. These are conditions that did not require - failing an operation, but of which the client should be aware. [GH-676] - * physical/(consul,etcd): Consul and etcd now use a connection pool to limit - the number of outstanding operations, improving behavior when a lot of - operations must happen at once [GH-677] [GH-780] - * physical/consul: The `datacenter` parameter was removed; It could not be - effective unless the Vault node (or the Consul node it was connecting to) - was in the datacenter specified, in which case it wasn't needed [GH-816] - * physical/etcd: Support TLS-encrypted connections and use a connection pool - to limit the number of outstanding operations [GH-780] - * physical/s3: The S3 endpoint can now be configured, allowing using - S3-API-compatible storage solutions [GH-750] - * physical/s3: The S3 bucket can now be configured with the `AWS_S3_BUCKET` - environment variable [GH-758] - * secret/consul: Management tokens can now be created [GH-714] - -BUG FIXES: - - * api: API client now checks for a 301 response for redirects. Vault doesn't - generate these, but in certain conditions Go's internal HTTP handler can - generate them, leading to client errors. - * cli: `token-create` now supports the `ttl` parameter in addition to the - deprecated `lease` parameter. [GH-688] - * core: Return data from `generic` backends on the last use of a limited-use - token [GH-615] - * core: Fix upgrade path for leases created in `generic` prior to 0.3 [GH-673] - * core: Stale leader entries will now be reaped [GH-679] - * core: Using `mount-tune` on the auth/token path did not take effect. - [GH-688] - * core: Fix a potential race condition when (un)sealing the vault with metrics - enabled [GH-694] - * core: Fix an error that could happen in some failure scenarios where Vault - could fail to revert to a clean state [GH-733] - * core: Ensure secondary indexes are removed when a lease is expired [GH-749] - * core: Ensure rollback manager uses an up-to-date mounts table [GH-771] - * everywhere: Don't use http.DefaultClient, as it shares state implicitly and - is a source of hard-to-track-down bugs [GH-700] - * credential/token: Allow creating orphan tokens via an API path [GH-748] - * secret/generic: Validate given duration at write time, not just read time; - if stored durations are not parseable, return a warning and the default - duration rather than an error [GH-718] - * secret/generic: Return 400 instead of 500 when `generic` backend is written - to with no data fields [GH-825] - * secret/postgresql: Revoke permissions before dropping a user or revocation - may fail [GH-699] - -MISC: - - * Various documentation fixes and improvements [GH-685] [GH-688] [GH-697] - [GH-710] [GH-715] [GH-831] - -## 0.3.1 (October 6, 2015) - -SECURITY: - - * core: In certain failure scenarios, the full values of requests and - responses would be logged [GH-665] - -FEATURES: - - * **Settable Maximum Open Connections**: The `mysql` and `postgresql` backends - now allow setting the number of maximum open connections to the database, - which was previously capped to 2. [GH-661] - * **Renewable Tokens for GitHub**: The `github` backend now supports - specifying a TTL, enabling renewable tokens. [GH-664] - -BUG FIXES: - - * dist: linux-amd64 distribution was dynamically linked [GH-656] - * credential/github: Fix acceptance tests [GH-651] - -MISC: - - * Various minor documentation fixes and improvements [GH-649] [GH-650] - [GH-654] [GH-663] - -## 0.3.0 (September 28, 2015) - -DEPRECATIONS/BREAKING CHANGES: - -Note: deprecations and breaking changes in upcoming releases are announced -ahead of time on the "vault-tool" mailing list. - - * **Cookie Authentication Removed**: As of 0.3 the only way to authenticate is - via the X-Vault-Token header. Cookie authentication was hard to properly - test, could result in browsers/tools/applications saving tokens in plaintext - on disk, and other issues. [GH-564] - * **Terminology/Field Names**: Vault is transitioning from overloading the - term "lease" to mean both "a set of metadata" and "the amount of time the - metadata is valid". The latter is now being referred to as TTL (or - "lease_duration" for backwards-compatibility); some parts of Vault have - already switched to using "ttl" and others will follow in upcoming releases. - In particular, the "token", "generic", and "pki" backends accept both "ttl" - and "lease" but in 0.4 only "ttl" will be accepted. [GH-528] - * **Downgrade Not Supported**: Due to enhancements in the storage subsystem, - values written by Vault 0.3+ will not be able to be read by prior versions - of Vault. There are no expected upgrade issues, however, as with all - critical infrastructure it is recommended to back up Vault's physical - storage before upgrading. - -FEATURES: - - * **SSH Backend**: Vault can now be used to delegate SSH access to machines, - via a (recommended) One-Time Password approach or by issuing dynamic keys. - [GH-385] - * **Cubbyhole Backend**: This backend works similarly to the "generic" backend - but provides a per-token workspace. This enables some additional - authentication workflows (especially for containers) and can be useful to - applications to e.g. store local credentials while being restarted or - upgraded, rather than persisting to disk. [GH-612] - * **Transit Backend Improvements**: The transit backend now allows key - rotation and datakey generation. For rotation, data encrypted with previous - versions of the keys can still be decrypted, down to a (configurable) - minimum previous version; there is a rewrap function for manual upgrades of - ciphertext to newer versions. Additionally, the backend now allows - generating and returning high-entropy keys of a configurable bitsize - suitable for AES and other functions; this is returned wrapped by a named - key, or optionally both wrapped and plaintext for immediate use. [GH-626] - * **Global and Per-Mount Default/Max TTL Support**: You can now set the - default and maximum Time To Live for leases both globally and per-mount. - Per-mount settings override global settings. Not all backends honor these - settings yet, but the maximum is a hard limit enforced outside the backend. - See the documentation for "/sys/mounts/" for details on configuring - per-mount TTLs. [GH-469] - * **PGP Encryption for Unseal Keys**: When initializing or rotating Vault's - master key, PGP/GPG public keys can now be provided. The output keys will be - encrypted with the given keys, in order. [GH-570] - * **Duo Multifactor Authentication Support**: Backends that support MFA can - now use Duo as the mechanism. [GH-464] - * **Performance Improvements**: Users of the "generic" backend will see a - significant performance improvement as the backend no longer creates leases, - although it does return TTLs (global/mount default, or set per-item) as - before. [GH-631] - * **Codebase Audit**: Vault's codebase was audited by iSEC. (The terms of the - audit contract do not allow us to make the results public.) [GH-220] - -IMPROVEMENTS: - - * audit: Log entries now contain a time field [GH-495] - * audit: Obfuscated audit entries now use hmac-sha256 instead of sha1 [GH-627] - * backends: Add ability for a cleanup function to be called on backend unmount - [GH-608] - * config: Allow specifying minimum acceptable TLS version [GH-447] - * core: If trying to mount in a location that is already mounted, be more - helpful about the error [GH-510] - * core: Be more explicit on failure if the issue is invalid JSON [GH-553] - * core: Tokens can now revoke themselves [GH-620] - * credential/app-id: Give a more specific error when sending a duplicate POST - to sys/auth/app-id [GH-392] - * credential/github: Support custom API endpoints (e.g. for Github Enterprise) - [GH-572] - * credential/ldap: Add per-user policies and option to login with - userPrincipalName [GH-420] - * credential/token: Allow root tokens to specify the ID of a token being - created from CLI [GH-502] - * credential/userpass: Enable renewals for login tokens [GH-623] - * scripts: Use /usr/bin/env to find Bash instead of hardcoding [GH-446] - * scripts: Use godep for build scripts to use same environment as tests - [GH-404] - * secret/mysql: Allow reading configuration data [GH-529] - * secret/pki: Split "allow_any_name" logic to that and "enforce_hostnames", to - allow for non-hostname values (e.g. for client certificates) [GH-555] - * storage/consul: Allow specifying certificates used to talk to Consul - [GH-384] - * storage/mysql: Allow SSL encrypted connections [GH-439] - * storage/s3: Allow using temporary security credentials [GH-433] - * telemetry: Put telemetry object in configuration to allow more flexibility - [GH-419] - * testing: Disable mlock for testing of logical backends so as not to require - root [GH-479] - -BUG FIXES: - - * audit/file: Do not enable auditing if file permissions are invalid [GH-550] - * backends: Allow hyphens in endpoint patterns (fixes AWS and others) [GH-559] - * cli: Fixed missing setup of client TLS certificates if no custom CA was - provided - * cli/read: Do not include a carriage return when using raw field output - [GH-624] - * core: Bad input data could lead to a panic for that session, rather than - returning an error [GH-503] - * core: Allow SHA2-384/SHA2-512 hashed certificates [GH-448] - * core: Do not return a Secret if there are no uses left on a token (since it - will be unable to be used) [GH-615] - * core: Code paths that called lookup-self would decrement num_uses and - potentially immediately revoke a token [GH-552] - * core: Some /sys/ paths would not properly redirect from a standby to the - leader [GH-499] [GH-551] - * credential/aws: Translate spaces in a token's display name to avoid making - IAM unhappy [GH-567] - * credential/github: Integration failed if more than ten organizations or - teams [GH-489] - * credential/token: Tokens with sudo access to "auth/token/create" can now use - root-only options [GH-629] - * secret/cassandra: Work around backwards-incompatible change made in - Cassandra 2.2 preventing Vault from properly setting/revoking leases - [GH-549] - * secret/mysql: Use varbinary instead of varchar to avoid InnoDB/UTF-8 issues - [GH-522] - * secret/postgres: Explicitly set timezone in connections [GH-597] - * storage/etcd: Renew semaphore periodically to prevent leadership flapping - [GH-606] - * storage/zk: Fix collisions in storage that could lead to data unavailability - [GH-411] - -MISC: - - * Various documentation fixes and improvements [GH-412] [GH-474] [GH-476] - [GH-482] [GH-483] [GH-486] [GH-508] [GH-568] [GH-574] [GH-586] [GH-590] - [GH-591] [GH-592] [GH-595] [GH-613] [GH-637] - * Less "armon" in stack traces [GH-453] - * Sourcegraph integration [GH-456] - -## 0.2.0 (July 13, 2015) - -FEATURES: - - * **Key Rotation Support**: The `rotate` command can be used to rotate the - master encryption key used to write data to the storage (physical) backend. - [GH-277] - * **Rekey Support**: Rekey can be used to rotate the master key and change the - configuration of the unseal keys (number of shares, threshold required). - [GH-277] - * **New secret backend: `pki`**: Enable Vault to be a certificate authority - and generate signed TLS certificates. [GH-310] - * **New secret backend: `cassandra`**: Generate dynamic credentials for - Cassandra [GH-363] - * **New storage backend: `etcd`**: store physical data in etcd [GH-259] - [GH-297] - * **New storage backend: `s3`**: store physical data in S3. Does not support - HA. [GH-242] - * **New storage backend: `MySQL`**: store physical data in MySQL. Does not - support HA. [GH-324] - * `transit` secret backend supports derived keys for per-transaction unique - keys [GH-399] - -IMPROVEMENTS: - - * cli/auth: Enable `cert` method [GH-380] - * cli/auth: read input from stdin [GH-250] - * cli/read: Ability to read a single field from a secret [GH-257] - * cli/write: Adding a force flag when no input required - * core: allow time duration format in place of seconds for some inputs - * core: audit log provides more useful information [GH-360] - * core: graceful shutdown for faster HA failover - * core: **change policy format** to use explicit globbing [GH-400] Any - existing policy in Vault is automatically upgraded to avoid issues. All - policy files must be updated for future writes. Adding the explicit glob - character `*` to the path specification is all that is required. - * core: policy merging to give deny highest precedence [GH-400] - * credential/app-id: Protect against timing attack on app-id - * credential/cert: Record the common name in the metadata [GH-342] - * credential/ldap: Allow TLS verification to be disabled [GH-372] - * credential/ldap: More flexible names allowed [GH-245] [GH-379] [GH-367] - * credential/userpass: Protect against timing attack on password - * credential/userpass: Use bcrypt for password matching - * http: response codes improved to reflect error [GH-366] - * http: the `sys/health` endpoint supports `?standbyok` to return 200 on - standby [GH-389] - * secret/app-id: Support deleting AppID and UserIDs [GH-200] - * secret/consul: Fine grained lease control [GH-261] - * secret/transit: Decouple raw key from key management endpoint [GH-355] - * secret/transit: Upsert named key when encrypt is used [GH-355] - * storage/zk: Support for HA configuration [GH-252] - * storage/zk: Changing node representation. **Backwards incompatible**. - [GH-416] - -BUG FIXES: - - * audit/file: file removing TLS connection state - * audit/syslog: fix removing TLS connection state - * command/*: commands accepting `k=v` allow blank values - * core: Allow building on FreeBSD [GH-365] - * core: Fixed various panics when audit logging enabled - * core: Lease renewal does not create redundant lease - * core: fixed leases with negative duration [GH-354] - * core: token renewal does not create child token - * core: fixing panic when lease increment is null [GH-408] - * credential/app-id: Salt the paths in storage backend to avoid information - leak - * credential/cert: Fixing client certificate not being requested - * credential/cert: Fixing panic when no certificate match found [GH-361] - * http: Accept PUT as POST for sys/auth - * http: Accept PUT as POST for sys/mounts [GH-349] - * http: Return 503 when sealed [GH-225] - * secret/postgres: Username length is capped to exceeding limit - * server: Do not panic if backend not configured [GH-222] - * server: Explicitly check value of tls_diable [GH-201] - * storage/zk: Fixed issues with version conflicts [GH-190] - -MISC: - - * cli/path-help: renamed from `help` to avoid confusion - -## 0.1.2 (May 11, 2015) - -FEATURES: - - * **New physical backend: `zookeeper`**: store physical data in Zookeeper. - HA not supported yet. - * **New credential backend: `ldap`**: authenticate using LDAP credentials. - -IMPROVEMENTS: - - * core: Auth backends can store internal data about auth creds - * audit: display name for auth is shown in logs [GH-176] - * command/*: `-insecure` has been renamed to `-tls-skip-verify` [GH-130] - * command/*: `VAULT_TOKEN` overrides local stored auth [GH-162] - * command/server: environment variables are copy-pastable - * credential/app-id: hash of app and user ID are in metadata [GH-176] - * http: HTTP API accepts `X-Vault-Token` as auth header [GH-124] - * logical/*: Generate help output even if no synopsis specified - -BUG FIXES: - - * core: login endpoints should never return secrets - * core: Internal data should never be returned from core endpoints - * core: defer barrier initialization to as late as possible to avoid error - cases during init that corrupt data (no data loss) - * core: guard against invalid init config earlier - * audit/file: create file if it doesn't exist [GH-148] - * command/*: ignore directories when traversing CA paths [GH-181] - * credential/*: all policy mapping keys are case insensitive [GH-163] - * physical/consul: Fixing path for locking so HA works in every case - -## 0.1.1 (May 2, 2015) - -SECURITY CHANGES: - - * physical/file: create the storge with 0600 permissions [GH-102] - * token/disk: write the token to disk with 0600 perms - -IMPROVEMENTS: - - * core: Very verbose error if mlock fails [GH-59] - * command/*: On error with TLS oversized record, show more human-friendly - error message. [GH-123] - * command/read: `lease_renewable` is now outputted along with the secret to - show whether it is renewable or not - * command/server: Add configuration option to disable mlock - * command/server: Disable mlock for dev mode so it works on more systems - -BUG FIXES: - - * core: if token helper isn't absolute, prepend with path to Vault - executable, not "vault" (which requires PATH) [GH-60] - * core: Any "mapping" routes allow hyphens in keys [GH-119] - * core: Validate `advertise_addr` is a valid URL with scheme [GH-106] - * command/auth: Using an invalid token won't crash [GH-75] - * credential/app-id: app and user IDs can have hyphens in keys [GH-119] - * helper/password: import proper DLL for Windows to ask password [GH-83] - -## 0.1.0 (April 28, 2015) - - * Initial release diff --git a/vendor/github.com/hashicorp/vault/CONTRIBUTING.md b/vendor/github.com/hashicorp/vault/CONTRIBUTING.md deleted file mode 100644 index 6fc1888bf92a..000000000000 --- a/vendor/github.com/hashicorp/vault/CONTRIBUTING.md +++ /dev/null @@ -1,72 +0,0 @@ -# Contributing to Vault - -**Please note:** We take Vault's security and our users' trust very seriously. -If you believe you have found a security issue in Vault, please responsibly -disclose by contacting us at security@hashicorp.com. - -**First:** if you're unsure or afraid of _anything_, just ask or submit the -issue or pull request anyways. You won't be yelled at for giving it your best -effort. The worst that can happen is that you'll be politely asked to change -something. We appreciate any sort of contributions, and don't want a wall of -rules to get in the way of that. - -That said, if you want to ensure that a pull request is likely to be merged, -talk to us! You can find out our thoughts and ensure that your contribution -won't clash or be obviated by Vault's normal direction. A great way to do this -is via the [Vault Google Group][2]. Sometimes Vault devs are in `#vault-tool` -on Freenode, too. - -This document will cover what we're looking for in terms of reporting issues. -By addressing all the points we're looking for, it raises the chances we can -quickly merge or address your contributions. - -## Issues - -### Reporting an Issue - -* Make sure you test against the latest released version. It is possible - we already fixed the bug you're experiencing. Even better is if you can test - against `master`, as bugs are fixed regularly but new versions are only - released every few months. - -* Provide steps to reproduce the issue, and if possible include the expected - results as well as the actual results. Please provide text, not screen shots! - -* If you are seeing an internal Vault error (a status code of 5xx), please be - sure to post relevant parts of (or the entire) Vault log, as often these - errors are logged on the server but not reported to the user - -* If you experienced a panic, please create a [gist](https://gist.github.com) - of the *entire* generated crash log for us to look at. Double check - no sensitive items were in the log. - -* Respond as promptly as possible to any questions made by the Vault - team to your issue. Stale issues will be closed periodically. - -### Issue Lifecycle - -1. The issue is reported. - -2. The issue is verified and categorized by a Vault collaborator. - Categorization is done via tags. For example, bugs are marked as "bugs". - -3. Unless it is critical, the issue may be left for a period of time (sometimes - many weeks), giving outside contributors -- maybe you!? -- a chance to - address the issue. - -4. The issue is addressed in a pull request or commit. The issue will be - referenced in the commit message so that the code that fixes it is clearly - linked. - -5. The issue is closed. Sometimes, valid issues will be closed to keep - the issue tracker clean. The issue is still indexed and available for - future viewers, or can be re-opened if necessary. - -## Setting up Go to work on Vault - -If you have never worked with Go before, you will have to complete the -following steps listed in the README, under the section [Developing Vault][1]. - - -[1]: https://github.com/hashicorp/vault#developing-vault -[2]: https://groups.google.com/group/vault-tool diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE deleted file mode 100644 index e87a115e462e..000000000000 --- a/vendor/github.com/hashicorp/vault/LICENSE +++ /dev/null @@ -1,363 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. "Contributor" - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. "Contributor Version" - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. - -1.3. "Contribution" - - means Covered Software of a particular Contributor. - -1.4. "Covered Software" - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. "Incompatible With Secondary Licenses" - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. - -1.6. "Executable Form" - - means any form of the work other than Source Code Form. - -1.7. "Larger Work" - - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. - -1.8. "License" - - means this document. - -1.9. "Licensable" - - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. - -1.10. "Modifications" - - means any of the following: - - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. "Patent Claims" of a Contributor - - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. - -1.12. "Secondary License" - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. "Source Code Form" - - means the form of the work preferred for making modifications. - -1.14. "You" (or "Your") - - means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party's - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. - - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - "Incompatible With Secondary Licenses" Notice - - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by - the Mozilla Public License, v. 2.0. - diff --git a/vendor/github.com/hashicorp/vault/Makefile b/vendor/github.com/hashicorp/vault/Makefile deleted file mode 100644 index 720e8637f5ec..000000000000 --- a/vendor/github.com/hashicorp/vault/Makefile +++ /dev/null @@ -1,63 +0,0 @@ -TEST?=$$(go list ./... | grep -v /vendor/) -VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -EXTERNAL_TOOLS=\ - github.com/mitchellh/gox -BUILD_TAGS?=vault - -default: dev - -# bin generates the releaseable binaries for Vault -bin: generate - @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/build.sh'" - -# dev creates binaries for testing Vault locally. These are put -# into ./bin/ as well as $GOPATH/bin -dev: generate - @CGO_ENABLED=0 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -dev-dynamic: generate - @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" - -# test runs the unit tests and vets the code -test: generate - CGO_ENABLED=0 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=10m -parallel=4 - -# testacc runs acceptance tests -testacc: generate - @if [ "$(TEST)" = "./..." ]; then \ - echo "ERROR: Set TEST to a specific package"; \ - exit 1; \ - fi - VAULT_ACC=1 go test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout 45m - -# testrace runs the race checker -testrace: generate - CGO_ENABLED=1 VAULT_TOKEN= VAULT_ACC= go test -tags='$(BUILD_TAGS)' -race $(TEST) $(TESTARGS) -timeout=20m -parallel=4 - -cover: - ./scripts/coverage.sh --html - -# vet runs the Go source code static analysis tool `vet` to find -# any common errors. -vet: - @go list -f '{{.Dir}}' ./... | grep -v /vendor/ \ - | grep -v '.*github.com/hashicorp/vault$$' \ - | xargs go tool vet ; if [ $$? -eq 1 ]; then \ - echo ""; \ - echo "Vet found suspicious constructs. Please check the reported constructs"; \ - echo "and fix them if necessary before submitting the code for reviewal."; \ - fi - -# generate runs `go generate` to build the dynamically generated -# source files. -generate: - go generate $(go list ./... | grep -v /vendor/) - -# bootstrap the build by downloading additional tools -bootstrap: - @for tool in $(EXTERNAL_TOOLS) ; do \ - echo "Installing $$tool" ; \ - go get $$tool; \ - done - -.PHONY: bin default generate test vet bootstrap diff --git a/vendor/github.com/hashicorp/vault/README.md b/vendor/github.com/hashicorp/vault/README.md deleted file mode 100644 index b96fd4f4db33..000000000000 --- a/vendor/github.com/hashicorp/vault/README.md +++ /dev/null @@ -1,130 +0,0 @@ -Vault [![Build Status](https://travis-ci.org/hashicorp/vault.svg)](https://travis-ci.org/hashicorp/vault) -========= -**Please note**: We take Vault's security and our users' trust very seriously. If you believe you have found a security issue in Vault, _please responsibly disclose_ by contacting us at [security@hashicorp.com](mailto:security@hashicorp.com). - -========= - -- Website: https://www.vaultproject.io -- IRC: `#vault-tool` on Freenode -- Announcement list: [Google Groups](https://groups.google.com/group/hashicorp-announce) -- Discussion list: [Google Groups](https://groups.google.com/group/vault-tool) - -![Vault](https://raw.githubusercontent.com/hashicorp/vault/master/website/source/assets/images/logo-big.png?token=AAAFE8XmW6YF5TNuk3cosDGBK-sUGPEjks5VSAa2wA%3D%3D) - -Vault is a tool for securely accessing secrets. A secret is anything that you want to tightly control access to, such as API keys, passwords, certificates, and more. Vault provides a unified interface to any secret, while providing tight access control and recording a detailed audit log. - -A modern system requires access to a multitude of secrets: database credentials, API keys for external services, credentials for service-oriented architecture communication, etc. Understanding who is accessing what secrets is already very difficult and platform-specific. Adding on key rolling, secure storage, and detailed audit logs is almost impossible without a custom solution. This is where Vault steps in. - -The key features of Vault are: - -* **Secure Secret Storage**: Arbitrary key/value secrets can be stored - in Vault. Vault encrypts these secrets prior to writing them to persistent - storage, so gaining access to the raw storage isn't enough to access - your secrets. Vault can write to disk, [Consul](https://www.consul.io), - and more. - -* **Dynamic Secrets**: Vault can generate secrets on-demand for some - systems, such as AWS or SQL databases. For example, when an application - needs to access an S3 bucket, it asks Vault for credentials, and Vault - will generate an AWS keypair with valid permissions on demand. After - creating these dynamic secrets, Vault will also automatically revoke them - after the lease is up. - -* **Data Encryption**: Vault can encrypt and decrypt data without storing - it. This allows security teams to define encryption parameters and - developers to store encrypted data in a location such as SQL without - having to design their own encryption methods. - -* **Leasing and Renewal**: All secrets in Vault have a _lease_ associated - with it. At the end of the lease, Vault will automatically revoke that - secret. Clients are able to renew leases via built-in renew APIs. - -* **Revocation**: Vault has built-in support for secret revocation. Vault - can revoke not only single secrets, but a tree of secrets, for example - all secrets read by a specific user, or all secrets of a particular type. - Revocation assists in key rolling as well as locking down systems in the - case of an intrusion. - -For more information, see the [introduction section](https://www.vaultproject.io/intro) -of the Vault website. - -Getting Started & Documentation -------------------------------- - -All documentation is available on the [Vault website](https://www.vaultproject.io). - -Developing Vault --------------------- - -If you wish to work on Vault itself or any of its built-in systems, -you'll first need [Go](https://www.golang.org) installed on your -machine (version 1.7+ is *required*). - -For local dev first make sure Go is properly installed, including setting up a -[GOPATH](https://golang.org/doc/code.html#GOPATH). Next, clone this repository -into `$GOPATH/src/github.com/hashicorp/vault`. You can then download any -required build tools by bootstrapping your environment: - -```sh -$ make bootstrap -... -``` - -To compile a development version of Vault, run `make` or `make dev`. This will -put the Vault binary in the `bin` and `$GOPATH/bin` folders: - -```sh -$ make dev -... -$ bin/vault -... -``` - -To run tests, type `make test`. Note: this requires Docker to be installed. If -this exits with exit status 0, then everything is working! - -```sh -$ make test -... -``` - -If you're developing a specific package, you can run tests for just that -package by specifying the `TEST` variable. For example below, only -`vault` package tests will be run. - -```sh -$ make test TEST=./vault -... -``` - -### Acceptance Tests - -Vault has comprehensive [acceptance tests](https://en.wikipedia.org/wiki/Acceptance_testing) -covering most of the features of the secret and auth backends. - -If you're working on a feature of a secret or auth backend and want to -verify it is functioning (and also hasn't broken anything else), we recommend -running the acceptance tests. - -**Warning:** The acceptance tests create/destroy/modify *real resources*, which -may incur real costs in some cases. In the presence of a bug, it is technically -possible that broken backends could leave dangling data behind. Therefore, -please run the acceptance tests at your own risk. At the very least, -we recommend running them in their own private account for whatever backend -you're testing. - -To run the acceptance tests, invoke `make testacc`: - -```sh -$ make testacc TEST=./builtin/logical/consul -... -``` - -The `TEST` variable is required, and you should specify the folder where the -backend is. The `TESTARGS` variable is recommended to filter down to a specific -resource to test, since testing all of them at once can sometimes take a very -long time. - -Acceptance tests typically require other environment variables to be set for -things such as access keys. The test itself should error early and tell -you what to set, so it is not documented here. diff --git a/vendor/github.com/hashicorp/vault/make.bat b/vendor/github.com/hashicorp/vault/make.bat deleted file mode 100644 index 34adbfded956..000000000000 --- a/vendor/github.com/hashicorp/vault/make.bat +++ /dev/null @@ -1,107 +0,0 @@ -@echo off -setlocal - -set _EXITCODE=0 - -REM If no target is provided, default to test. -if [%1]==[] goto test - -set _TARGETS=bin,dev,generate,test,testacc,testrace,vet - -REM Run target. -for %%a in (%_TARGETS%) do (if x%1==x%%a goto %%a) -goto usage - -REM bin generates the releaseable binaries for Vault -:bin - call :generate - call .\scripts\windows\build.bat "%CD%" - goto :eof - -REM dev creates binaries for testing Vault locally. These are put -REM into ./bin/ as well as %GOPATH%/bin -:dev - call :generate - call .\scripts\windows\build.bat "%CD%" VAULT_DEV - goto :eof - -REM generate runs `go generate` to build the dynamically generated -REM source files. -:generate - go list ./... | findstr /v vendor | go generate - goto :eof - -REM test runs the unit tests and vets the code. -:test - call :testsetup - go test %_TEST% %TESTARGS% -timeout=30s -parallel=4 - call :setMaxExitCode %ERRORLEVEL% - echo. - goto vet - -REM testacc runs acceptance tests. -:testacc - call :testsetup - if x%_TEST% == x./... goto testacc_fail - if x%_TEST% == x.\... goto testacc_fail - set VAULT_ACC=1 - go test %_TEST% -v %TESTARGS% -timeout 45m - exit /b %ERRORLEVEL% -:testacc_fail - echo ERROR: Set %%TEST%% to a specific package. - exit /b 1 - -REM testrace runs the race checker. -:testrace - call :testsetup - go test -race %_TEST% %TESTARGS% - exit /b %ERRORLEVEL% - -REM testsetup calls `go generate` and defines the variables VAULT_ACC -REM and _TEST. VAULT_ACC is always cleared. _TEST defaults to the value -REM of the TEST environment variable, provided that TEST is defined, -REM otherwise _TEST it is set to "./...". -:testsetup - call :generate - set VAULT_ACC= - set _TEST=./... - if defined TEST set _TEST=%TEST% - goto :eof - -REM vet runs the Go source code static analysis tool `vet` to find -REM any common errors. -:vet - set _VETARGS=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr - if defined VETARGS set _VETARGS=%VETARGS% - - go tool vet 2>nul - if %ERRORLEVEL% equ 3 go get golang.org/x/tools/cmd/vet - - set _vetExitCode=0 - set _VAULT_PKG_DIRS=%TEMP%\vault-pkg-dirs.txt - - go list -f {{.Dir}} ./... | findstr /v vendor >"%_VAULT_PKG_DIRS%" - REM Skip the first row, which is the main vault package (.*github.com/hashicorp/vault$) - for /f "delims= skip=1" %%d in ("%_VAULT_PKG_DIRS%") do ( - go tool vet %_VETARGS% "%%d" - if ERRORLEVEL 1 set _vetExitCode=1 - call :setMaxExitCode %_vetExitCode% - ) - del /f "%_VAULT_PKG_DIRS%" 2>NUL - if %_vetExitCode% equ 0 exit /b %_EXITCODE% - echo. - echo Vet found suspicious constructs. Please check the reported constructs - echo and fix them if necessary before submitting the code for reviewal. - exit /b %_EXITCODE% - -:setMaxExitCode - if %1 gtr %_EXITCODE% set _EXITCODE=%1 - goto :eof - -:usage - echo usage: make [target] - echo. - echo target is in {%_TARGETS%}. - echo target defaults to test if none is provided. - exit /b 2 - goto :eof diff --git a/vendor/vendor.json b/vendor/vendor.json index c09a8f098709..579b3b3c1e95 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -195,7 +195,6 @@ {"path":"github.com/hashicorp/raft-boltdb","checksumSHA1":"QAxukkv54/iIvLfsUP6IK4R0m/A=","revision":"d1e82c1ec3f15ee991f7cc7ffd5b67ff6f5bbaee","revisionTime":"2015-02-01T20:08:39Z"}, {"path":"github.com/hashicorp/serf/coordinate","checksumSHA1":"0PeWsO2aI+2PgVYlYlDPKfzCLEQ=","revision":"80ab48778deee28e4ea2dc4ef1ebb2c5f4063996","revisionTime":"2018-05-07T23:19:28Z"}, {"path":"github.com/hashicorp/serf/serf","checksumSHA1":"QrT+nzyXsD/MmhTjjhcPdnALZ1I=","revision":"80ab48778deee28e4ea2dc4ef1ebb2c5f4063996","revisionTime":"2018-05-07T23:19:28Z"}, - {"path":"github.com/hashicorp/vault","checksumSHA1":"eGzvBRMFD6ZB3A6uO750np7Om/E=","revision":"182ba68a9589d4cef95234134aaa498a686e3de3","revisionTime":"2016-08-21T23:40:57Z"}, {"path":"github.com/hashicorp/vault/api","checksumSHA1":"+B4wuJNerIUKNAVzld7CmMaNW5A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/compressutil","checksumSHA1":"bSdPFOHaTwEvM4PIvn0PZfn75jM=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/hclutil","checksumSHA1":"RlqPBLOexQ0jj6jomhiompWKaUg=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, From 054b041976878db548f682d5f2d1c0e2bfdb4426 Mon Sep 17 00:00:00 2001 From: Alex Dadgar Date: Tue, 11 Sep 2018 12:05:47 -0700 Subject: [PATCH 6/6] vault consts --- vendor/github.com/hashicorp/vault/LICENSE | 363 ++++++++++++++++++ .../hashicorp/vault/helper/consts/consts.go | 14 + .../hashicorp/vault/helper/consts/error.go | 16 + .../vault/helper/consts/replication.go | 87 +++++ vendor/vendor.json | 1 + 5 files changed, 481 insertions(+) create mode 100644 vendor/github.com/hashicorp/vault/LICENSE create mode 100644 vendor/github.com/hashicorp/vault/helper/consts/consts.go create mode 100644 vendor/github.com/hashicorp/vault/helper/consts/error.go create mode 100644 vendor/github.com/hashicorp/vault/helper/consts/replication.go diff --git a/vendor/github.com/hashicorp/vault/LICENSE b/vendor/github.com/hashicorp/vault/LICENSE new file mode 100644 index 000000000000..e87a115e462e --- /dev/null +++ b/vendor/github.com/hashicorp/vault/LICENSE @@ -0,0 +1,363 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + diff --git a/vendor/github.com/hashicorp/vault/helper/consts/consts.go b/vendor/github.com/hashicorp/vault/helper/consts/consts.go new file mode 100644 index 000000000000..972a69f47b96 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/consts/consts.go @@ -0,0 +1,14 @@ +package consts + +const ( + // ExpirationRestoreWorkerCount specifies the number of workers to use while + // restoring leases into the expiration manager + ExpirationRestoreWorkerCount = 64 + + // NamespaceHeaderName is the header set to specify which namespace the + // request is indented for. + NamespaceHeaderName = "X-Vault-Namespace" + + // AuthHeaderName is the name of the header containing the token. + AuthHeaderName = "X-Vault-Token" +) diff --git a/vendor/github.com/hashicorp/vault/helper/consts/error.go b/vendor/github.com/hashicorp/vault/helper/consts/error.go new file mode 100644 index 000000000000..06977d5d5a4c --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/consts/error.go @@ -0,0 +1,16 @@ +package consts + +import "errors" + +var ( + // ErrSealed is returned if an operation is performed on a sealed barrier. + // No operation is expected to succeed before unsealing + ErrSealed = errors.New("Vault is sealed") + + // ErrStandby is returned if an operation is performed on a standby Vault. + // No operation is expected to succeed until active. + ErrStandby = errors.New("Vault is in standby mode") + + // Used when .. is used in a path + ErrPathContainsParentReferences = errors.New("path cannot contain parent references") +) diff --git a/vendor/github.com/hashicorp/vault/helper/consts/replication.go b/vendor/github.com/hashicorp/vault/helper/consts/replication.go new file mode 100644 index 000000000000..bdad15522576 --- /dev/null +++ b/vendor/github.com/hashicorp/vault/helper/consts/replication.go @@ -0,0 +1,87 @@ +package consts + +import "time" + +type ReplicationState uint32 + +var ReplicationStaleReadTimeout = 2 * time.Second + +const ( + _ ReplicationState = iota + OldReplicationPrimary + OldReplicationSecondary + OldReplicationBootstrapping + // Don't add anything here. Adding anything to this Old block would cause + // the rest of the values to change below. This was done originally to + // ensure no overlap between old and new values. + + ReplicationUnknown ReplicationState = 0 + ReplicationPerformancePrimary ReplicationState = 1 << iota + ReplicationPerformanceSecondary + OldSplitReplicationBootstrapping + ReplicationDRPrimary + ReplicationDRSecondary + ReplicationPerformanceBootstrapping + ReplicationDRBootstrapping + ReplicationPerformanceDisabled + ReplicationDRDisabled + ReplicationPerformanceStandby +) + +func (r ReplicationState) string() string { + switch r { + case ReplicationPerformanceSecondary: + return "secondary" + case ReplicationPerformancePrimary: + return "primary" + case ReplicationPerformanceBootstrapping: + return "bootstrapping" + case ReplicationPerformanceDisabled: + return "disabled" + case ReplicationDRPrimary: + return "primary" + case ReplicationDRSecondary: + return "secondary" + case ReplicationDRBootstrapping: + return "bootstrapping" + case ReplicationDRDisabled: + return "disabled" + } + + return "unknown" +} + +func (r ReplicationState) GetDRString() string { + switch { + case r.HasState(ReplicationDRBootstrapping): + return ReplicationDRBootstrapping.string() + case r.HasState(ReplicationDRPrimary): + return ReplicationDRPrimary.string() + case r.HasState(ReplicationDRSecondary): + return ReplicationDRSecondary.string() + case r.HasState(ReplicationDRDisabled): + return ReplicationDRDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) GetPerformanceString() string { + switch { + case r.HasState(ReplicationPerformanceBootstrapping): + return ReplicationPerformanceBootstrapping.string() + case r.HasState(ReplicationPerformancePrimary): + return ReplicationPerformancePrimary.string() + case r.HasState(ReplicationPerformanceSecondary): + return ReplicationPerformanceSecondary.string() + case r.HasState(ReplicationPerformanceDisabled): + return ReplicationPerformanceDisabled.string() + default: + return "unknown" + } +} + +func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 } +func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag } +func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag } +func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag } diff --git a/vendor/vendor.json b/vendor/vendor.json index 579b3b3c1e95..98a87c8339ee 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -197,6 +197,7 @@ {"path":"github.com/hashicorp/serf/serf","checksumSHA1":"QrT+nzyXsD/MmhTjjhcPdnALZ1I=","revision":"80ab48778deee28e4ea2dc4ef1ebb2c5f4063996","revisionTime":"2018-05-07T23:19:28Z"}, {"path":"github.com/hashicorp/vault/api","checksumSHA1":"+B4wuJNerIUKNAVzld7CmMaNW5A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/compressutil","checksumSHA1":"bSdPFOHaTwEvM4PIvn0PZfn75jM=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, + {"path":"github.com/hashicorp/vault/helper/consts","checksumSHA1":"QNGGvSYtwk6VCkj4laZPjM2301E=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/hclutil","checksumSHA1":"RlqPBLOexQ0jj6jomhiompWKaUg=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/jsonutil","checksumSHA1":"POgkM3GrjRFw6H3sw95YNEs552A=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"}, {"path":"github.com/hashicorp/vault/helper/parseutil","checksumSHA1":"HA2MV/2XI0HcoThSRxQCaBZR2ps=","revision":"8575f8fedcf8f5a6eb2b4701cb527b99574b5286","revisionTime":"2018-09-06T17:45:45Z"},