diff --git a/.travis.yml b/.travis.yml index 738b75231227..045f3d8d12af 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,7 +14,7 @@ services: - docker go: - - "1.10.2" + - "1.10.3" go_import_path: github.com/hashicorp/vault diff --git a/CHANGELOG.md b/CHANGELOG.md index 74ede4a370e7..5ede8db6f138 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,13 @@ +## 0.10.3 (Unreleased) + +BUG FIXES: + + * secrets/kv: Fix writing to the root of a KVv2 mount from `vault kv` commands + incorrectly operating on a root+mount path instead of being an error + [GH-4726] + * seal/pkcs11: Add `CKK_SHA256_HMAC` to the search list when finding HMAC + keys, fixing lookup on some Thales devices + ## 0.10.2 (June 6th, 2018) SECURITY: @@ -89,6 +99,8 @@ IMPROVEMENTS: * ui: Identity interface now lists groups by name [GH-4655] * ui: Permission denied errors still render the sidebar in the Access section [GH-4658] + * replication: Improve performance of index page flushes and WAL garbage + collecting BUG FIXES: diff --git a/builtin/credential/approle/backend.go b/builtin/credential/approle/backend.go index 01911441297b..d57a3ec8a687 100644 --- a/builtin/credential/approle/backend.go +++ b/builtin/credential/approle/backend.go @@ -30,7 +30,7 @@ type backend struct { view logical.Storage // Guard to clean-up the expired SecretID entries - tidySecretIDCASGuard uint32 + tidySecretIDCASGuard *uint32 // Locks to make changes to role entries. These will be initialized to a // predefined number of locks when the backend is created, and will be @@ -85,6 +85,8 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { // Create locks to modify the generated SecretIDAccessors secretIDAccessorLocks: locksutil.CreateLocks(), + + tidySecretIDCASGuard: new(uint32), } // Attach the paths and secrets that are to be handled by the backend diff --git a/builtin/credential/approle/path_tidy_user_id.go b/builtin/credential/approle/path_tidy_user_id.go index 1a385efd2aa5..590cb7284d41 100644 --- a/builtin/credential/approle/path_tidy_user_id.go +++ b/builtin/credential/approle/path_tidy_user_id.go @@ -27,9 +27,9 @@ func pathTidySecretID(b *backend) *framework.Path { // tidySecretID is used to delete entries in the whitelist that are expired. func (b *backend) tidySecretID(ctx context.Context, s logical.Storage) error { - grabbed := atomic.CompareAndSwapUint32(&b.tidySecretIDCASGuard, 0, 1) + grabbed := atomic.CompareAndSwapUint32(b.tidySecretIDCASGuard, 0, 1) if grabbed { - defer atomic.StoreUint32(&b.tidySecretIDCASGuard, 0) + defer atomic.StoreUint32(b.tidySecretIDCASGuard, 0) } else { return fmt.Errorf("SecretID tidy operation already running") } diff --git a/builtin/credential/aws/backend.go b/builtin/credential/aws/backend.go index 146625711837..992b2959878d 100644 --- a/builtin/credential/aws/backend.go +++ b/builtin/credential/aws/backend.go @@ -39,8 +39,8 @@ type backend struct { blacklistMutex sync.RWMutex // Guards the blacklist/whitelist tidy functions - tidyBlacklistCASGuard uint32 - tidyWhitelistCASGuard uint32 + tidyBlacklistCASGuard *uint32 + tidyWhitelistCASGuard *uint32 // Duration after which the periodic function of the backend needs to // tidy the blacklist and whitelist entries. @@ -82,10 +82,12 @@ func Backend(conf *logical.BackendConfig) (*backend, error) { b := &backend{ // Setting the periodic func to be run once in an hour. // If there is a real need, this can be made configurable. - tidyCooldownPeriod: time.Hour, - EC2ClientsMap: make(map[string]map[string]*ec2.EC2), - IAMClientsMap: make(map[string]map[string]*iam.IAM), - iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour), + tidyCooldownPeriod: time.Hour, + EC2ClientsMap: make(map[string]map[string]*ec2.EC2), + IAMClientsMap: make(map[string]map[string]*iam.IAM), + iamUserIdToArnCache: cache.New(7*24*time.Hour, 24*time.Hour), + tidyBlacklistCASGuard: new(uint32), + tidyWhitelistCASGuard: new(uint32), } b.resolveArnToUniqueIDFunc = b.resolveArnToRealUniqueId diff --git a/builtin/credential/aws/path_tidy_identity_whitelist.go b/builtin/credential/aws/path_tidy_identity_whitelist.go index fa0e8d82dae7..f1abe2308614 100644 --- a/builtin/credential/aws/path_tidy_identity_whitelist.go +++ b/builtin/credential/aws/path_tidy_identity_whitelist.go @@ -34,9 +34,9 @@ expiration, before it is removed from the backend storage.`, // tidyWhitelistIdentity is used to delete entries in the whitelist that are expired. func (b *backend) tidyWhitelistIdentity(ctx context.Context, s logical.Storage, safety_buffer int) error { - grabbed := atomic.CompareAndSwapUint32(&b.tidyWhitelistCASGuard, 0, 1) + grabbed := atomic.CompareAndSwapUint32(b.tidyWhitelistCASGuard, 0, 1) if grabbed { - defer atomic.StoreUint32(&b.tidyWhitelistCASGuard, 0) + defer atomic.StoreUint32(b.tidyWhitelistCASGuard, 0) } else { return fmt.Errorf("identity whitelist tidy operation already running") } diff --git a/builtin/credential/aws/path_tidy_roletag_blacklist.go b/builtin/credential/aws/path_tidy_roletag_blacklist.go index dfb420653e83..a29837110d2b 100644 --- a/builtin/credential/aws/path_tidy_roletag_blacklist.go +++ b/builtin/credential/aws/path_tidy_roletag_blacklist.go @@ -34,9 +34,9 @@ expiration, before it is removed from the backend storage.`, // tidyBlacklistRoleTag is used to clean-up the entries in the role tag blacklist. func (b *backend) tidyBlacklistRoleTag(ctx context.Context, s logical.Storage, safety_buffer int) error { - grabbed := atomic.CompareAndSwapUint32(&b.tidyBlacklistCASGuard, 0, 1) + grabbed := atomic.CompareAndSwapUint32(b.tidyBlacklistCASGuard, 0, 1) if grabbed { - defer atomic.StoreUint32(&b.tidyBlacklistCASGuard, 0) + defer atomic.StoreUint32(b.tidyBlacklistCASGuard, 0) } else { return fmt.Errorf("roletag blacklist tidy operation already running") } diff --git a/command/kv_delete.go b/command/kv_delete.go index 9ce39976a9dd..945b4c675846 100644 --- a/command/kv_delete.go +++ b/command/kv_delete.go @@ -101,14 +101,18 @@ func (c *KVDeleteCommand) Run(args []string) int { return 2 } + var secret *api.Secret if v2 { - err = c.deleteV2(path, mountPath, client) + secret, err = c.deleteV2(path, mountPath, client) } else { - _, err = client.Logical().Delete(path) + secret, err = client.Logical().Delete(path) } if err != nil { c.UI.Error(fmt.Sprintf("Error deleting %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } return 2 } @@ -116,29 +120,30 @@ func (c *KVDeleteCommand) Run(args []string) int { return 0 } -func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) error { +func (c *KVDeleteCommand) deleteV2(path, mountPath string, client *api.Client) (*api.Secret, error) { var err error + var secret *api.Secret switch { case len(c.flagVersions) > 0: path = addPrefixToVKVPath(path, mountPath, "delete") if err != nil { - return err + return nil, err } data := map[string]interface{}{ "versions": kvParseVersionsFlags(c.flagVersions), } - _, err = client.Logical().Write(path, data) + secret, err = client.Logical().Write(path, data) default: path = addPrefixToVKVPath(path, mountPath, "data") if err != nil { - return err + return nil, err } - _, err = client.Logical().Delete(path) + secret, err = client.Logical().Delete(path) } - return err + return secret, err } diff --git a/command/kv_destroy.go b/command/kv_destroy.go index f622e3f05f00..64ed284d1864 100644 --- a/command/kv_destroy.go +++ b/command/kv_destroy.go @@ -115,6 +115,9 @@ func (c *KVDestroyCommand) Run(args []string) int { secret, err := client.Logical().Write(path, data) if err != nil { c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } return 2 } if secret == nil { diff --git a/command/kv_helpers.go b/command/kv_helpers.go index 1c3c74a8c907..2ed1d9739a6e 100644 --- a/command/kv_helpers.go +++ b/command/kv_helpers.go @@ -99,8 +99,13 @@ func isKVv2(path string, client *api.Client) (string, bool, error) { } func addPrefixToVKVPath(p, mountPath, apiPrefix string) string { - p = strings.TrimPrefix(p, mountPath) - return path.Join(mountPath, apiPrefix, p) + switch { + case p == mountPath, p == strings.TrimSuffix(mountPath, "/"): + return path.Join(mountPath, apiPrefix) + default: + p = strings.TrimPrefix(p, mountPath) + return path.Join(mountPath, apiPrefix, p) + } } func getHeaderForMap(header string, data map[string]interface{}) string { diff --git a/command/kv_metadata_delete.go b/command/kv_metadata_delete.go index 446533bdf307..8ff3a5655e47 100644 --- a/command/kv_metadata_delete.go +++ b/command/kv_metadata_delete.go @@ -82,8 +82,11 @@ func (c *KVMetadataDeleteCommand) Run(args []string) int { } path = addPrefixToVKVPath(path, mountPath, "metadata") - if _, err := client.Logical().Delete(path); err != nil { + if secret, err := client.Logical().Delete(path); err != nil { c.UI.Error(fmt.Sprintf("Error deleting %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } return 2 } diff --git a/command/kv_metadata_get.go b/command/kv_metadata_get.go index 75c4cd22f7f8..a9bfc77eaad5 100644 --- a/command/kv_metadata_get.go +++ b/command/kv_metadata_get.go @@ -105,7 +105,13 @@ func (c *KVMetadataGetCommand) Run(args []string) int { return OutputSecret(c.UI, secret) } - versions := secret.Data["versions"].(map[string]interface{}) + versionsRaw, ok := secret.Data["versions"] + if !ok || versionsRaw == nil { + c.UI.Error(fmt.Sprintf("No value found at %s", path)) + OutputSecret(c.UI, secret) + return 2 + } + versions := versionsRaw.(map[string]interface{}) delete(secret.Data, "versions") diff --git a/command/kv_metadata_put.go b/command/kv_metadata_put.go index 32f8d9248152..b88287de7441 100644 --- a/command/kv_metadata_put.go +++ b/command/kv_metadata_put.go @@ -125,6 +125,9 @@ func (c *KVMetadataPutCommand) Run(args []string) int { secret, err := client.Logical().Write(path, data) if err != nil { c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } return 2 } if secret == nil { diff --git a/command/kv_undelete.go b/command/kv_undelete.go index 58eee93c0b9a..7c11e8ce5d48 100644 --- a/command/kv_undelete.go +++ b/command/kv_undelete.go @@ -110,6 +110,9 @@ func (c *KVUndeleteCommand) Run(args []string) int { secret, err := client.Logical().Write(path, data) if err != nil { c.UI.Error(fmt.Sprintf("Error writing data to %s: %s", path, err)) + if secret != nil { + OutputSecret(c.UI, secret) + } return 2 } if secret == nil { diff --git a/http/cors.go b/http/cors.go index a01228be2da6..9e8b6fa195e0 100644 --- a/http/cors.go +++ b/http/cors.go @@ -22,13 +22,18 @@ func wrapCORSHandler(h http.Handler, core *vault.Core) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { corsConf := core.CORSConfig() - origin := req.Header.Get("Origin") - requestMethod := req.Header.Get("Access-Control-Request-Method") - // If CORS is not enabled or if no Origin header is present (i.e. the request // is from the Vault CLI. A browser will always send an Origin header), then // just return a 204. - if !corsConf.IsEnabled() || origin == "" { + if !corsConf.IsEnabled() { + h.ServeHTTP(w, req) + return + } + + origin := req.Header.Get("Origin") + requestMethod := req.Header.Get("Access-Control-Request-Method") + + if origin == "" { h.ServeHTTP(w, req) return } diff --git a/http/forwarding_test.go b/http/forwarding_test.go index a55b4f66bdca..13db9f79161b 100644 --- a/http/forwarding_test.go +++ b/http/forwarding_test.go @@ -128,7 +128,7 @@ func TestHTTP_Forwarding_Stress(t *testing.T) { testHTTP_Forwarding_Stress_Common(t, true, 50) } -func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) { +func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint32) { testPlaintext := "the quick brown fox" testPlaintextB64 := "dGhlIHF1aWNrIGJyb3duIGZveA==" @@ -186,29 +186,33 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) } //core.Logger().Printf("[TRACE] done mounting transit") - var totalOps uint64 - var successfulOps uint64 - var key1ver int64 = 1 - var key2ver int64 = 1 - var key3ver int64 = 1 - var numWorkers uint64 = 50 - var numWorkersStarted uint64 + var totalOps *uint32 = new(uint32) + var successfulOps *uint32 = new(uint32) + var key1ver *int32 = new(int32) + *key1ver = 1 + var key2ver *int32 = new(int32) + *key2ver = 1 + var key3ver *int32 = new(int32) + *key3ver = 1 + var numWorkers *uint32 = new(uint32) + *numWorkers = 50 + var numWorkersStarted *uint32 = new(uint32) var waitLock sync.Mutex waitCond := sync.NewCond(&waitLock) // This is the goroutine loop doFuzzy := func(id int, parallel bool) { - var myTotalOps uint64 - var mySuccessfulOps uint64 - var keyVer int64 = 1 + var myTotalOps uint32 + var mySuccessfulOps uint32 + var keyVer int32 = 1 // Check for panics, otherwise notify we're done defer func() { if err := recover(); err != nil { core.Logger().Error("got a panic: %v", err) t.Fail() } - atomic.AddUint64(&totalOps, myTotalOps) - atomic.AddUint64(&successfulOps, mySuccessfulOps) + atomic.AddUint32(totalOps, myTotalOps) + atomic.AddUint32(successfulOps, mySuccessfulOps) wg.Done() }() @@ -281,10 +285,10 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) } } - atomic.AddUint64(&numWorkersStarted, 1) + atomic.AddUint32(numWorkersStarted, 1) waitCond.L.Lock() - for atomic.LoadUint64(&numWorkersStarted) != numWorkers { + for atomic.LoadUint32(numWorkersStarted) != atomic.LoadUint32(numWorkers) { waitCond.Wait() } waitCond.L.Unlock() @@ -375,11 +379,11 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) if parallel { switch chosenKey { case "test1": - atomic.AddInt64(&key1ver, 1) + atomic.AddInt32(key1ver, 1) case "test2": - atomic.AddInt64(&key2ver, 1) + atomic.AddInt32(key2ver, 1) case "test3": - atomic.AddInt64(&key3ver, 1) + atomic.AddInt32(key3ver, 1) } } else { keyVer++ @@ -389,19 +393,19 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) // Change the min version, which also tests the archive functionality case "change_min_version": - var latestVersion int64 = keyVer + var latestVersion int32 = keyVer if parallel { switch chosenKey { case "test1": - latestVersion = atomic.LoadInt64(&key1ver) + latestVersion = atomic.LoadInt32(key1ver) case "test2": - latestVersion = atomic.LoadInt64(&key2ver) + latestVersion = atomic.LoadInt32(key2ver) case "test3": - latestVersion = atomic.LoadInt64(&key3ver) + latestVersion = atomic.LoadInt32(key3ver) } } - setVersion := (myRand.Int63() % latestVersion) + 1 + setVersion := (myRand.Int31() % latestVersion) + 1 //core.Logger().Printf("[TRACE] %s, %s, %d, new min version %d", chosenFunc, chosenKey, id, setVersion) @@ -415,10 +419,10 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) } } - atomic.StoreUint64(&numWorkers, num) + atomic.StoreUint32(numWorkers, num) // Spawn some of these workers for 10 seconds - for i := 0; i < int(atomic.LoadUint64(&numWorkers)); i++ { + for i := 0; i < int(atomic.LoadUint32(numWorkers)); i++ { wg.Add(1) //core.Logger().Printf("[TRACE] spawning %d", i) go doFuzzy(i+1, parallel) @@ -427,10 +431,10 @@ func testHTTP_Forwarding_Stress_Common(t *testing.T, parallel bool, num uint64) // Wait for them all to finish wg.Wait() - if totalOps == 0 || totalOps != successfulOps { - t.Fatalf("total/successful ops zero or mismatch: %d/%d; parallel: %t, num %d", totalOps, successfulOps, parallel, num) + if *totalOps == 0 || *totalOps != *successfulOps { + t.Fatalf("total/successful ops zero or mismatch: %d/%d; parallel: %t, num %d", *totalOps, *successfulOps, parallel, num) } - t.Logf("total operations tried: %d, total successful: %d; parallel: %t, num %d", totalOps, successfulOps, parallel, num) + t.Logf("total operations tried: %d, total successful: %d; parallel: %t, num %d", *totalOps, *successfulOps, parallel, num) } // This tests TLS connection state forwarding by ensuring that we can use a diff --git a/http/handler.go b/http/handler.go index f7d6437bb2a1..a9be673cb675 100644 --- a/http/handler.go +++ b/http/handler.go @@ -474,6 +474,7 @@ func requestAuth(core *vault.Core, r *http.Request, req *logical.Request) *logic if err == nil && te != nil { req.ClientTokenAccessor = te.Accessor req.ClientTokenRemainingUses = te.NumUses + req.SetTokenEntry(te) } } diff --git a/logical/framework/backend_test.go b/logical/framework/backend_test.go index eb80889c1a42..fa050ac60c46 100644 --- a/logical/framework/backend_test.go +++ b/logical/framework/backend_test.go @@ -203,9 +203,9 @@ func TestBackendHandleRequest_renewAuth(t *testing.T) { } func TestBackendHandleRequest_renewAuthCallback(t *testing.T) { - var called uint32 + called := new(uint32) callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { - atomic.AddUint32(&called, 1) + atomic.AddUint32(called, 1) return nil, nil } @@ -217,14 +217,14 @@ func TestBackendHandleRequest_renewAuthCallback(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if v := atomic.LoadUint32(&called); v != 1 { + if v := atomic.LoadUint32(called); v != 1 { t.Fatalf("bad: %#v", v) } } func TestBackendHandleRequest_renew(t *testing.T) { - var called uint32 + called := new(uint32) callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { - atomic.AddUint32(&called, 1) + atomic.AddUint32(called, 1) return nil, nil } @@ -240,15 +240,15 @@ func TestBackendHandleRequest_renew(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if v := atomic.LoadUint32(&called); v != 1 { + if v := atomic.LoadUint32(called); v != 1 { t.Fatalf("bad: %#v", v) } } func TestBackendHandleRequest_revoke(t *testing.T) { - var called uint32 + called := new(uint32) callback := func(context.Context, *logical.Request, *FieldData) (*logical.Response, error) { - atomic.AddUint32(&called, 1) + atomic.AddUint32(called, 1) return nil, nil } @@ -264,16 +264,16 @@ func TestBackendHandleRequest_revoke(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if v := atomic.LoadUint32(&called); v != 1 { + if v := atomic.LoadUint32(called); v != 1 { t.Fatalf("bad: %#v", v) } } func TestBackendHandleRequest_rollback(t *testing.T) { - var called uint32 + called := new(uint32) callback := func(_ context.Context, req *logical.Request, kind string, data interface{}) error { if data == "foo" { - atomic.AddUint32(&called, 1) + atomic.AddUint32(called, 1) } return nil } @@ -298,16 +298,16 @@ func TestBackendHandleRequest_rollback(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if v := atomic.LoadUint32(&called); v != 1 { + if v := atomic.LoadUint32(called); v != 1 { t.Fatalf("bad: %#v", v) } } func TestBackendHandleRequest_rollbackMinAge(t *testing.T) { - var called uint32 + called := new(uint32) callback := func(_ context.Context, req *logical.Request, kind string, data interface{}) error { if data == "foo" { - atomic.AddUint32(&called, 1) + atomic.AddUint32(called, 1) } return nil } @@ -330,7 +330,7 @@ func TestBackendHandleRequest_rollbackMinAge(t *testing.T) { if err != nil { t.Fatalf("err: %s", err) } - if v := atomic.LoadUint32(&called); v != 0 { + if v := atomic.LoadUint32(called); v != 0 { t.Fatalf("bad: %#v", v) } } diff --git a/logical/request.go b/logical/request.go index 7bff6f0804db..4c395370d6e2 100644 --- a/logical/request.go +++ b/logical/request.go @@ -141,6 +141,14 @@ type Request struct { // accessible. Unauthenticated bool `json:"unauthenticated" structs:"unauthenticated" mapstructure:"unauthenticated"` + // Cached token entry. This avoids another lookup in request handling when + // we've already looked it up at http handling time. Note that this token + // has not been "used", as in it will not properly take into account use + // count limitations. As a result this field should only ever be used for + // transport to a function that would otherwise do a lookup and then + // properly use the token. + tokenEntry *TokenEntry + // For replication, contains the last WAL on the remote side after handling // the request, used for best-effort avoidance of stale read-after-write lastRemoteWAL uint64 @@ -199,6 +207,14 @@ func (r *Request) SetLastRemoteWAL(last uint64) { r.lastRemoteWAL = last } +func (r *Request) TokenEntry() *TokenEntry { + return r.tokenEntry +} + +func (r *Request) SetTokenEntry(te *TokenEntry) { + r.tokenEntry = te +} + // RenewRequest creates the structure of the renew request. func RenewRequest(path string, secret *Secret, data map[string]interface{}) *Request { return &Request{ diff --git a/logical/token.go b/logical/token.go new file mode 100644 index 000000000000..337791e486eb --- /dev/null +++ b/logical/token.go @@ -0,0 +1,119 @@ +package logical + +import ( + "time" + + sockaddr "github.com/hashicorp/go-sockaddr" +) + +// TokenEntry is used to represent a given token +type TokenEntry struct { + // ID of this entry, generally a random UUID + ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` + + // Accessor for this token, a random UUID + Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` + + // Parent token, used for revocation trees + Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` + + // Which named policies should be used + Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` + + // Used for audit trails, this is something like "auth/user/login" + Path string `json:"path" mapstructure:"path" structs:"path"` + + // Used for auditing. This could include things like "source", "user", "ip" + Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` + + // Used for operators to be able to associate with the source + DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` + + // Used to restrict the number of uses (zero is unlimited). This is to + // support one-time-tokens (generalized). There are a few special values: + // if it's -1 it has run through its use counts and is executing its final + // use; if it's -2 it is tainted, which means revocation is currently + // running on it; and if it's -3 it's also tainted but revocation + // previously ran and failed, so this hints the tidy function to try it + // again. + NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` + + // Time of token creation + CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` + + // Duration set when token was created + TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` + + // Explicit maximum TTL on the token + ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` + + // If set, the role that was used for parameters at creation time + Role string `json:"role" mapstructure:"role" structs:"role"` + + // If set, the period of the token. This is only used when created directly + // through the create endpoint; periods managed by roles or other auth + // backends are subject to those renewal rules. + Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` + + // These are the deprecated fields + DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` + NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` + CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` + ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` + + EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` + + // The set of CIDRs that this token can be used with + BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` +} + +func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { + if te == nil { + return nil, nil + } + switch key { + case "period": + return te.Period, nil + + case "period_seconds": + return int64(te.Period.Seconds()), nil + + case "explicit_max_ttl": + return te.ExplicitMaxTTL, nil + + case "explicit_max_ttl_seconds": + return int64(te.ExplicitMaxTTL.Seconds()), nil + + case "creation_ttl": + return te.TTL, nil + + case "creation_ttl_seconds": + return int64(te.TTL.Seconds()), nil + + case "creation_time": + return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil + + case "creation_time_unix": + return time.Unix(te.CreationTime, 0), nil + + case "meta", "metadata": + return te.Meta, nil + } + + return nil, nil +} + +func (te *TokenEntry) SentinelKeys() []string { + return []string{ + "period", + "period_seconds", + "explicit_max_ttl", + "explicit_max_ttl_seconds", + "creation_ttl", + "creation_ttl_seconds", + "creation_time", + "creation_time_unix", + "meta", + "metadata", + } +} diff --git a/physical/consul/consul.go b/physical/consul/consul.go index 9f0beb364183..982fefca7ce2 100644 --- a/physical/consul/consul.go +++ b/physical/consul/consul.go @@ -636,9 +636,9 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph // and end of a handler's life (or after a handler wakes up from // sleeping during a back-off/retry). var shutdown bool - var checkLock int64 var registeredServiceID string - var serviceRegLock int64 + checkLock := new(int32) + serviceRegLock := new(int32) for !shutdown { select { @@ -654,10 +654,10 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph // Abort if service discovery is disabled or a // reconcile handler is already active - if !c.disableRegistration && atomic.CompareAndSwapInt64(&serviceRegLock, 0, 1) { + if !c.disableRegistration && atomic.CompareAndSwapInt32(serviceRegLock, 0, 1) { // Enter handler with serviceRegLock held go func() { - defer atomic.CompareAndSwapInt64(&serviceRegLock, 1, 0) + defer atomic.CompareAndSwapInt32(serviceRegLock, 1, 0) for !shutdown { serviceID, err := c.reconcileConsul(registeredServiceID, activeFunc, sealedFunc) if err != nil { @@ -680,10 +680,10 @@ func (c *ConsulBackend) runEventDemuxer(waitGroup *sync.WaitGroup, shutdownCh ph checkTimer.Reset(c.checkDuration()) // Abort if service discovery is disabled or a // reconcile handler is active - if !c.disableRegistration && atomic.CompareAndSwapInt64(&checkLock, 0, 1) { + if !c.disableRegistration && atomic.CompareAndSwapInt32(checkLock, 0, 1) { // Enter handler with checkLock held go func() { - defer atomic.CompareAndSwapInt64(&checkLock, 1, 0) + defer atomic.CompareAndSwapInt32(checkLock, 1, 0) for !shutdown { sealed := sealedFunc() if err := c.runCheck(sealed); err != nil { diff --git a/physical/inmem/inmem.go b/physical/inmem/inmem.go index 139671ce6a4e..0274305685a2 100644 --- a/physical/inmem/inmem.go +++ b/physical/inmem/inmem.go @@ -36,10 +36,10 @@ type InmemBackend struct { root *radix.Tree permitPool *physical.PermitPool logger log.Logger - failGet uint32 - failPut uint32 - failDelete uint32 - failList uint32 + failGet *uint32 + failPut *uint32 + failDelete *uint32 + failList *uint32 } type TransactionalInmemBackend struct { @@ -52,6 +52,10 @@ func NewInmem(_ map[string]string, logger log.Logger) (physical.Backend, error) root: radix.New(), permitPool: physical.NewPermitPool(physical.DefaultParallelOperations), logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), } return in, nil } @@ -64,6 +68,10 @@ func NewTransactionalInmem(_ map[string]string, logger log.Logger) (physical.Bac root: radix.New(), permitPool: physical.NewPermitPool(1), logger: logger, + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), }, } return in, nil @@ -81,7 +89,7 @@ func (i *InmemBackend) Put(ctx context.Context, entry *physical.Entry) error { } func (i *InmemBackend) PutInternal(ctx context.Context, entry *physical.Entry) error { - if atomic.LoadUint32(&i.failPut) != 0 { + if atomic.LoadUint32(i.failPut) != 0 { return PutDisabledError } @@ -94,7 +102,7 @@ func (i *InmemBackend) FailPut(fail bool) { if fail { val = 1 } - atomic.StoreUint32(&i.failPut, val) + atomic.StoreUint32(i.failPut, val) } // Get is used to fetch an entry @@ -109,7 +117,7 @@ func (i *InmemBackend) Get(ctx context.Context, key string) (*physical.Entry, er } func (i *InmemBackend) GetInternal(ctx context.Context, key string) (*physical.Entry, error) { - if atomic.LoadUint32(&i.failGet) != 0 { + if atomic.LoadUint32(i.failGet) != 0 { return nil, GetDisabledError } @@ -127,7 +135,7 @@ func (i *InmemBackend) FailGet(fail bool) { if fail { val = 1 } - atomic.StoreUint32(&i.failGet, val) + atomic.StoreUint32(i.failGet, val) } // Delete is used to permanently delete an entry @@ -142,7 +150,7 @@ func (i *InmemBackend) Delete(ctx context.Context, key string) error { } func (i *InmemBackend) DeleteInternal(ctx context.Context, key string) error { - if atomic.LoadUint32(&i.failDelete) != 0 { + if atomic.LoadUint32(i.failDelete) != 0 { return DeleteDisabledError } @@ -155,7 +163,7 @@ func (i *InmemBackend) FailDelete(fail bool) { if fail { val = 1 } - atomic.StoreUint32(&i.failDelete, val) + atomic.StoreUint32(i.failDelete, val) } // List is used ot list all the keys under a given @@ -171,7 +179,7 @@ func (i *InmemBackend) List(ctx context.Context, prefix string) ([]string, error } func (i *InmemBackend) ListInternal(prefix string) ([]string, error) { - if atomic.LoadUint32(&i.failList) != 0 { + if atomic.LoadUint32(i.failList) != 0 { return nil, ListDisabledError } @@ -201,7 +209,7 @@ func (i *InmemBackend) FailList(fail bool) { if fail { val = 1 } - atomic.StoreUint32(&i.failList, val) + atomic.StoreUint32(i.failList, val) } // Implements the transaction interface diff --git a/physical/inmem/transactions_test.go b/physical/inmem/transactions_test.go index fe123100c785..a505c606af6c 100644 --- a/physical/inmem/transactions_test.go +++ b/physical/inmem/transactions_test.go @@ -71,6 +71,10 @@ func newFaultyPseudo(logger log.Logger, faultyPaths []string) *faultyPseudo { root: radix.New(), permitPool: physical.NewPermitPool(1), logger: logger.Named("storage.inmembackend"), + failGet: new(uint32), + failPut: new(uint32), + failDelete: new(uint32), + failList: new(uint32), }, faultyPaths: make(map[string]struct{}, len(faultyPaths)), } diff --git a/scripts/cross/Dockerfile b/scripts/cross/Dockerfile index 2c191e084be7..23e927271883 100644 --- a/scripts/cross/Dockerfile +++ b/scripts/cross/Dockerfile @@ -19,7 +19,7 @@ RUN rm -rf /var/lib/apt/lists/* RUN npm install -g yarn@1.5.0 -ENV GOVERSION 1.10.1 +ENV GOVERSION 1.10.3 RUN mkdir /goroot && mkdir /gopath RUN curl https://storage.googleapis.com/golang/go${GOVERSION}.linux-amd64.tar.gz \ | tar xvzf - -C /goroot --strip-components=1 diff --git a/vault/acl.go b/vault/acl.go index e86e79e35f5a..bc792052246e 100644 --- a/vault/acl.go +++ b/vault/acl.go @@ -406,7 +406,7 @@ CHECK: ret.Allowed = true return } -func (c *Core) performPolicyChecks(ctx context.Context, acl *ACL, te *TokenEntry, req *logical.Request, inEntity *identity.Entity, opts *PolicyCheckOpts) (ret *AuthResults) { +func (c *Core) performPolicyChecks(ctx context.Context, acl *ACL, te *logical.TokenEntry, req *logical.Request, inEntity *identity.Entity, opts *PolicyCheckOpts) (ret *AuthResults) { ret = new(AuthResults) // First, perform normal ACL checks if requested. The only time no ACL diff --git a/vault/capabilities_test.go b/vault/capabilities_test.go index 40111af0a248..368478838381 100644 --- a/vault/capabilities_test.go +++ b/vault/capabilities_test.go @@ -69,7 +69,7 @@ path "secret/sample" { entityID := resp.Data["id"].(string) // Create a token for the entity and assign policy2 on the token - ent := &TokenEntry{ + ent := &logical.TokenEntry{ ID: "capabilitiestoken", Path: "secret/sample", Policies: []string{"policy2"}, @@ -135,7 +135,7 @@ func TestCapabilities(t *testing.T) { } // Create a token for the policy - ent := &TokenEntry{ + ent := &logical.TokenEntry{ ID: "capabilitiestoken", Path: "testpath", Policies: []string{"dev"}, diff --git a/vault/core.go b/vault/core.go index 76107f90236d..51075ae3e96c 100644 --- a/vault/core.go +++ b/vault/core.go @@ -191,7 +191,7 @@ type Core struct { standbyDoneCh chan struct{} standbyStopCh chan struct{} manualStepDownCh chan struct{} - keepHALockOnStepDown uint32 + keepHALockOnStepDown *uint32 heldHALock physical.Lock // unlockInfo has the keys provided to Unseal until the threshold number of parts is available, as well as the operation nonce @@ -500,6 +500,7 @@ func NewCore(conf *CoreConfig) (*Core, error) { localClusterCert: new(atomic.Value), localClusterParsedCert: new(atomic.Value), activeNodeReplicationState: new(uint32), + keepHALockOnStepDown: new(uint32), } atomic.StoreUint32(c.replicationState, uint32(consts.ReplicationDRDisabled|consts.ReplicationPerformanceDisabled)) @@ -516,7 +517,10 @@ func NewCore(conf *CoreConfig) (*Core, error) { } // Load CORS config and provide a value for the core field. - c.corsConfig = &CORSConfig{core: c} + c.corsConfig = &CORSConfig{ + core: c, + Enabled: new(uint32), + } phys := conf.Physical _, txnOK := conf.Physical.(physical.Transactional) @@ -1138,7 +1142,7 @@ func (c *Core) sealInternal(keepLock bool) error { } } else { if keepLock { - atomic.StoreUint32(&c.keepHALockOnStepDown, 1) + atomic.StoreUint32(c.keepHALockOnStepDown, 1) } // If we are trying to acquire the lock, force it to return with nil so // runStandby will exit @@ -1150,7 +1154,7 @@ func (c *Core) sealInternal(keepLock bool) error { // Wait for runStandby to stop <-c.standbyDoneCh - atomic.StoreUint32(&c.keepHALockOnStepDown, 0) + atomic.StoreUint32(c.keepHALockOnStepDown, 0) c.logger.Debug("runStandby done") } diff --git a/vault/core_test.go b/vault/core_test.go index efb37cb4d6c5..ff28434745b9 100644 --- a/vault/core_test.go +++ b/vault/core_test.go @@ -276,7 +276,7 @@ func TestCore_Seal_BadToken(t *testing.T) { // GH-3497 func TestCore_Seal_SingleUse(t *testing.T) { c, keys, _ := TestCoreUnsealed(t) - c.tokenStore.create(context.Background(), &TokenEntry{ + c.tokenStore.create(context.Background(), &logical.TokenEntry{ ID: "foo", NumUses: 1, Policies: []string{"root"}, @@ -719,7 +719,7 @@ func TestCore_HandleLogin_Token(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - expect := &TokenEntry{ + expect := &logical.TokenEntry{ ID: clientToken, Accessor: te.Accessor, Parent: "", @@ -1022,7 +1022,7 @@ func TestCore_HandleRequest_CreateToken_Lease(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - expect := &TokenEntry{ + expect := &logical.TokenEntry{ ID: clientToken, Accessor: te.Accessor, Parent: root, @@ -1067,7 +1067,7 @@ func TestCore_HandleRequest_CreateToken_NoDefaultPolicy(t *testing.T) { if err != nil { t.Fatalf("err: %v", err) } - expect := &TokenEntry{ + expect := &logical.TokenEntry{ ID: clientToken, Accessor: te.Accessor, Parent: root, diff --git a/vault/cors.go b/vault/cors.go index 6b0920a73b6b..c389a6e6d2be 100644 --- a/vault/cors.go +++ b/vault/cors.go @@ -32,7 +32,7 @@ var StdAllowedHeaders = []string{ type CORSConfig struct { sync.RWMutex `json:"-"` core *Core - Enabled uint32 `json:"enabled"` + Enabled *uint32 `json:"enabled"` AllowedOrigins []string `json:"allowed_origins,omitempty"` AllowedHeaders []string `json:"allowed_headers,omitempty"` } @@ -40,8 +40,9 @@ type CORSConfig struct { func (c *Core) saveCORSConfig(ctx context.Context) error { view := c.systemBarrierView.SubView("config/") + enabled := atomic.LoadUint32(c.corsConfig.Enabled) localConfig := &CORSConfig{ - Enabled: atomic.LoadUint32(&c.corsConfig.Enabled), + Enabled: &enabled, } c.corsConfig.RLock() localConfig.AllowedOrigins = c.corsConfig.AllowedOrigins @@ -78,6 +79,11 @@ func (c *Core) loadCORSConfig(ctx context.Context) error { if err != nil { return err } + + if newConfig.Enabled == nil { + newConfig.Enabled = new(uint32) + } + newConfig.core = c c.corsConfig = newConfig @@ -109,19 +115,19 @@ func (c *CORSConfig) Enable(ctx context.Context, urls []string, headers []string } c.Unlock() - atomic.StoreUint32(&c.Enabled, CORSEnabled) + atomic.StoreUint32(c.Enabled, CORSEnabled) return c.core.saveCORSConfig(ctx) } // IsEnabled returns the value of CORSConfig.isEnabled func (c *CORSConfig) IsEnabled() bool { - return atomic.LoadUint32(&c.Enabled) == CORSEnabled + return atomic.LoadUint32(c.Enabled) == CORSEnabled } // Disable sets CORS to disabled and clears the allowed origins & headers. func (c *CORSConfig) Disable(ctx context.Context) error { - atomic.StoreUint32(&c.Enabled, CORSDisabled) + atomic.StoreUint32(c.Enabled, CORSDisabled) c.Lock() c.AllowedOrigins = nil diff --git a/vault/expiration.go b/vault/expiration.go index 66b069d47734..29edadcfbb48 100644 --- a/vault/expiration.go +++ b/vault/expiration.go @@ -66,9 +66,9 @@ type ExpirationManager struct { pending map[string]*time.Timer pendingLock sync.RWMutex - tidyLock int32 + tidyLock *int32 - restoreMode int32 + restoreMode *int32 restoreModeLock sync.RWMutex restoreRequestLock sync.RWMutex restoreLocks []*locksutil.LockEntry @@ -77,7 +77,7 @@ type ExpirationManager struct { coreStateLock *sync.RWMutex quitContext context.Context - leaseCheckCounter uint32 + leaseCheckCounter *uint32 logLeaseExpirations bool } @@ -92,19 +92,21 @@ func NewExpirationManager(c *Core, view *BarrierView, logger log.Logger) *Expira tokenStore: c.tokenStore, logger: logger, pending: make(map[string]*time.Timer), + tidyLock: new(int32), // new instances of the expiration manager will go immediately into // restore mode - restoreMode: 1, + restoreMode: new(int32), restoreLocks: locksutil.CreateLocks(), quitCh: make(chan struct{}), coreStateLock: &c.stateLock, quitContext: c.activeContext, - leaseCheckCounter: 0, + leaseCheckCounter: new(uint32), logLeaseExpirations: os.Getenv("VAULT_SKIP_LOGGING_LEASE_EXPIRATIONS") == "", } + *exp.restoreMode = 1 if exp.logger == nil { opts := log.LoggerOptions{Name: "expiration_manager"} @@ -168,7 +170,7 @@ func (m *ExpirationManager) unlockLease(leaseID string) { // inRestoreMode returns if we are currently in restore mode func (m *ExpirationManager) inRestoreMode() bool { - return atomic.LoadInt32(&m.restoreMode) == 1 + return atomic.LoadInt32(m.restoreMode) == 1 } // Tidy cleans up the dangling storage entries for leases. It scans the storage @@ -184,12 +186,12 @@ func (m *ExpirationManager) Tidy() error { var tidyErrors *multierror.Error - if !atomic.CompareAndSwapInt32(&m.tidyLock, 0, 1) { + if !atomic.CompareAndSwapInt32(m.tidyLock, 0, 1) { m.logger.Warn("tidy operation on leases is already in progress") return fmt.Errorf("tidy operation on leases is already in progress") } - defer atomic.CompareAndSwapInt32(&m.tidyLock, 1, 0) + defer atomic.CompareAndSwapInt32(m.tidyLock, 1, 0) m.logger.Info("beginning tidy operation on leases") defer m.logger.Info("finished tidy operation on leases") @@ -294,7 +296,7 @@ func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { // if restore mode finished successfully, restore mode was already // disabled with the lock. In an error state, this will allow the // Stop() function to shut everything down. - atomic.StoreInt32(&m.restoreMode, 0) + atomic.StoreInt32(m.restoreMode, 0) switch { case retErr == nil: @@ -409,7 +411,7 @@ func (m *ExpirationManager) Restore(errorFunc func()) (retErr error) { m.restoreModeLock.Lock() m.restoreLoaded = sync.Map{} m.restoreLocks = nil - atomic.StoreInt32(&m.restoreMode, 0) + atomic.StoreInt32(m.restoreMode, 0) m.restoreModeLock.Unlock() m.logger.Info("lease restore complete") @@ -557,7 +559,7 @@ func (m *ExpirationManager) RevokePrefix(prefix string) error { // This is done by using the secondary index. It also removes the lease entry // for the token itself. As a result it should *ONLY* ever be called from the // token store's revokeSalted function. -func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error { +func (m *ExpirationManager) RevokeByToken(te *logical.TokenEntry) error { defer metrics.MeasureSince([]string{"expire", "revoke-by-token"}, time.Now()) // Lookup the leases @@ -580,11 +582,16 @@ func (m *ExpirationManager) RevokeByToken(te *TokenEntry) error { if le != nil { le.ExpireTime = time.Now() - if err := m.persistEntry(le); err != nil { - return err - } + { + m.pendingLock.Lock() + if err := m.persistEntry(le); err != nil { + m.pendingLock.Unlock() + return err + } - m.updatePending(le, 0) + m.updatePendingInternal(le, 0) + m.pendingLock.Unlock() + } } } @@ -707,12 +714,18 @@ func (m *ExpirationManager) Renew(leaseID string, increment time.Duration) (*log le.Secret = resp.Secret le.ExpireTime = resp.Secret.ExpirationTime() le.LastRenewalTime = time.Now() - if err := m.persistEntry(le); err != nil { - return nil, err - } - // Update the expiration time - m.updatePending(le, resp.Secret.LeaseTotal()) + { + m.pendingLock.Lock() + if err := m.persistEntry(le); err != nil { + m.pendingLock.Unlock() + return nil, err + } + + // Update the expiration time + m.updatePendingInternal(le, resp.Secret.LeaseTotal()) + m.pendingLock.Unlock() + } // Return the response return resp, nil @@ -782,12 +795,18 @@ func (m *ExpirationManager) RenewToken(req *logical.Request, source string, toke le.Auth = resp.Auth le.ExpireTime = resp.Auth.ExpirationTime() le.LastRenewalTime = time.Now() - if err := m.persistEntry(le); err != nil { - return nil, err - } - // Update the expiration time - m.updatePending(le, resp.Auth.LeaseTotal()) + { + m.pendingLock.Lock() + if err := m.persistEntry(le); err != nil { + m.pendingLock.Unlock() + return nil, err + } + + // Update the expiration time + m.updatePendingInternal(le, resp.Auth.LeaseTotal()) + m.pendingLock.Unlock() + } retResp.Auth = resp.Auth return retResp, nil @@ -963,7 +982,10 @@ func (m *ExpirationManager) FetchLeaseTimes(leaseID string) (*leaseEntry, error) func (m *ExpirationManager) updatePending(le *leaseEntry, leaseTotal time.Duration) { m.pendingLock.Lock() defer m.pendingLock.Unlock() + m.updatePendingInternal(le, leaseTotal) +} +func (m *ExpirationManager) updatePendingInternal(le *leaseEntry, leaseTotal time.Duration) { // Check for an existing timer timer, ok := m.pending[le.LeaseID] @@ -1227,7 +1249,7 @@ func (m *ExpirationManager) removeIndexByToken(token, leaseID string) error { // CreateOrFetchRevocationLeaseByToken is used to create or fetch the matching // leaseID for a particular token. The lease is set to expire immediately after // it's created. -func (m *ExpirationManager) CreateOrFetchRevocationLeaseByToken(te *TokenEntry) (string, error) { +func (m *ExpirationManager) CreateOrFetchRevocationLeaseByToken(te *logical.TokenEntry) (string, error) { // Fetch the saltedID of the token and construct the leaseID saltedID, err := m.tokenStore.SaltID(m.quitContext, te.ID) if err != nil { @@ -1311,11 +1333,11 @@ func (m *ExpirationManager) emitMetrics() { metrics.SetGauge([]string{"expire", "num_leases"}, float32(num)) // Check if lease count is greater than the threshold if num > maxLeaseThreshold { - if atomic.LoadUint32(&m.leaseCheckCounter) > 59 { + if atomic.LoadUint32(m.leaseCheckCounter) > 59 { m.logger.Warn("lease count exceeds warning lease threshold") - atomic.StoreUint32(&m.leaseCheckCounter, 0) + atomic.StoreUint32(m.leaseCheckCounter, 0) } else { - atomic.AddUint32(&m.leaseCheckCounter, 1) + atomic.AddUint32(m.leaseCheckCounter, 1) } } } diff --git a/vault/expiration_test.go b/vault/expiration_test.go index e0650f17defd..e0b93ec43bc8 100644 --- a/vault/expiration_test.go +++ b/vault/expiration_test.go @@ -736,7 +736,7 @@ func TestExpiration_RevokeByToken(t *testing.T) { } // Should nuke all the keys - te := &TokenEntry{ + te := &logical.TokenEntry{ ID: "foobarbaz", } if err := exp.RevokeByToken(te); err != nil { @@ -823,7 +823,7 @@ func TestExpiration_RevokeByToken_Blocking(t *testing.T) { } // Should nuke all the keys - te := &TokenEntry{ + te := &logical.TokenEntry{ ID: "foobarbaz", } if err := exp.RevokeByToken(te); err != nil { @@ -899,7 +899,7 @@ func TestExpiration_RenewToken(t *testing.T) { func TestExpiration_RenewToken_period(t *testing.T) { exp := mockExpiration(t) - root := &TokenEntry{ + root := &logical.TokenEntry{ Policies: []string{"root"}, Path: "auth/token/root", DisplayName: "root", diff --git a/vault/ha.go b/vault/ha.go index 8f08982435af..6850cb235a1e 100644 --- a/vault/ha.go +++ b/vault/ha.go @@ -29,6 +29,12 @@ func (c *Core) Standby() (bool, error) { // Leader is used to get the current active leader func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err error) { + // Check if HA enabled. We don't need the lock for this check as it's set + // on startup and never modified + if c.ha == nil { + return false, "", "", ErrHANotEnabled + } + c.stateLock.RLock() defer c.stateLock.RUnlock() @@ -37,11 +43,6 @@ func (c *Core) Leader() (isLeader bool, leaderAddr, clusterAddr string, err erro return false, "", "", consts.ErrSealed } - // Check if HA enabled - if c.ha == nil { - return false, "", "", ErrHANotEnabled - } - // Check if we are the leader if !c.standby { return true, c.redirectAddr, c.clusterAddr, nil @@ -419,7 +420,7 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { case <-stopCh: // This case comes from sealInternal; we will already be having the // state lock held so we do toggle grabStateLock to false - if atomic.LoadUint32(&c.keepHALockOnStepDown) == 1 { + if atomic.LoadUint32(c.keepHALockOnStepDown) == 1 { releaseHALock = false } grabStateLock = false @@ -466,13 +467,13 @@ func (c *Core) runStandby(doneCh, manualStepDownCh, stopCh chan struct{}) { // the result. func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) { defer close(doneCh) - var opCount int32 + opCount := new(int32) for { select { case <-time.After(leaderCheckInterval): - count := atomic.AddInt32(&opCount, 1) + count := atomic.AddInt32(opCount, 1) if count > 1 { - atomic.AddInt32(&opCount, -1) + atomic.AddInt32(opCount, -1) continue } // We do this in a goroutine because otherwise if this refresh is @@ -480,7 +481,7 @@ func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) { // deadlock, which then means stopCh can never been seen and we can // block shutdown go func() { - defer atomic.AddInt32(&opCount, -1) + defer atomic.AddInt32(opCount, -1) c.Leader() }() case <-stopCh: @@ -492,18 +493,18 @@ func (c *Core) periodicLeaderRefresh(doneCh, stopCh chan struct{}) { // periodicCheckKeyUpgrade is used to watch for key rotation events as a standby func (c *Core) periodicCheckKeyUpgrade(ctx context.Context, doneCh, stopCh chan struct{}) { defer close(doneCh) - var opCount int32 + opCount := new(int32) for { select { case <-time.After(keyRotateCheckInterval): - count := atomic.AddInt32(&opCount, 1) + count := atomic.AddInt32(opCount, 1) if count > 1 { - atomic.AddInt32(&opCount, -1) + atomic.AddInt32(opCount, -1) continue } go func() { - defer atomic.AddInt32(&opCount, -1) + defer atomic.AddInt32(opCount, -1) // Only check if we are a standby c.stateLock.RLock() standby := c.standby diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index 8cae25f55e2e..ae44ae02f56f 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -29,7 +29,7 @@ func TestIdentityStore_EntityIDPassthrough(t *testing.T) { } // Create a token with the above created entity set on it - ent := &TokenEntry{ + ent := &logical.TokenEntry{ ID: "testtokenid", Path: "test", Policies: []string{"root"}, @@ -221,7 +221,7 @@ func TestIdentityStore_WrapInfoInheritance(t *testing.T) { // Create a token which has EntityID set and has update permissions to // sys/wrapping/wrap - te := &TokenEntry{ + te := &logical.TokenEntry{ Path: "test", Policies: []string{"default", responseWrappingPolicyName}, EntityID: entityID, @@ -260,7 +260,7 @@ func TestIdentityStore_TokenEntityInheritance(t *testing.T) { ts := c.tokenStore // Create a token which has EntityID set - te := &TokenEntry{ + te := &logical.TokenEntry{ Path: "test", Policies: []string{"dev", "prod"}, EntityID: "testentityid", diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go index 6b203f0294c6..cdf61e2d2724 100644 --- a/vault/logical_system_test.go +++ b/vault/logical_system_test.go @@ -1283,7 +1283,7 @@ func TestSystemBackend_revokePrefixAuth_newUrl(t *testing.T) { exp := ts.expiration - te := &TokenEntry{ + te := &logical.TokenEntry{ ID: "foo", Path: "auth/github/login/bar", TTL: time.Hour, @@ -1346,7 +1346,7 @@ func TestSystemBackend_revokePrefixAuth_origUrl(t *testing.T) { exp := ts.expiration - te := &TokenEntry{ + te := &logical.TokenEntry{ ID: "foo", Path: "auth/github/login/bar", TTL: time.Hour, diff --git a/vault/request_forwarding.go b/vault/request_forwarding.go index 472ed6cc4075..c4c0e73cd977 100644 --- a/vault/request_forwarding.go +++ b/vault/request_forwarding.go @@ -79,7 +79,7 @@ func (c *Core) startForwarding(ctx context.Context) error { fws := &http2.Server{} // Shutdown coordination logic - var shutdown uint32 + shutdown := new(uint32) shutdownWg := &sync.WaitGroup{} for _, addr := range c.clusterListenerAddrs { @@ -120,7 +120,7 @@ func (c *Core) startForwarding(ctx context.Context) error { } for { - if atomic.LoadUint32(&shutdown) > 0 { + if atomic.LoadUint32(shutdown) > 0 { return } @@ -213,7 +213,7 @@ func (c *Core) startForwarding(ctx context.Context) error { // Set the shutdown flag. This will cause the listeners to shut down // within the deadline in clusterListenerAcceptDeadline - atomic.StoreUint32(&shutdown, 1) + atomic.StoreUint32(shutdown, 1) c.logger.Info("forwarding rpc listeners stopped") // Wait for them all to shut down diff --git a/vault/request_handling.go b/vault/request_handling.go index cfde6f4858ab..df22ab727a50 100644 --- a/vault/request_handling.go +++ b/vault/request_handling.go @@ -77,7 +77,7 @@ func (c *Core) fetchEntityAndDerivedPolicies(entityID string) (*identity.Entity, return entity, policies, err } -func (c *Core) fetchACLTokenEntryAndEntity(req *logical.Request) (*ACL, *TokenEntry, *identity.Entity, error) { +func (c *Core) fetchACLTokenEntryAndEntity(req *logical.Request) (*ACL, *logical.TokenEntry, *identity.Entity, error) { defer metrics.MeasureSince([]string{"core", "fetch_acl_and_token"}, time.Now()) // Ensure there is a client token @@ -91,10 +91,17 @@ func (c *Core) fetchACLTokenEntryAndEntity(req *logical.Request) (*ACL, *TokenEn } // Resolve the token policy - te, err := c.tokenStore.Lookup(c.activeContext, req.ClientToken) - if err != nil { - c.logger.Error("failed to lookup token", "error", err) - return nil, nil, nil, ErrInternalError + var te *logical.TokenEntry + switch req.TokenEntry() { + case nil: + var err error + te, err = c.tokenStore.Lookup(c.activeContext, req.ClientToken) + if err != nil { + c.logger.Error("failed to lookup token", "error", err) + return nil, nil, nil, ErrInternalError + } + default: + te = req.TokenEntry() } // Ensure the token is valid @@ -142,11 +149,11 @@ func (c *Core) fetchACLTokenEntryAndEntity(req *logical.Request) (*ACL, *TokenEn return acl, te, entity, nil } -func (c *Core) checkToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *TokenEntry, error) { +func (c *Core) checkToken(ctx context.Context, req *logical.Request, unauth bool) (*logical.Auth, *logical.TokenEntry, error) { defer metrics.MeasureSince([]string{"core", "check_token"}, time.Now()) var acl *ACL - var te *TokenEntry + var te *logical.TokenEntry var entity *identity.Entity var err error @@ -783,7 +790,7 @@ func (c *Core) handleLoginRequest(ctx context.Context, req *logical.Request) (re } // Generate a token - te := TokenEntry{ + te := logical.TokenEntry{ Path: req.Path, Policies: auth.Policies, Meta: auth.Metadata, diff --git a/vault/router.go b/vault/router.go index 249cce305d0a..476f25afa50b 100644 --- a/vault/router.go +++ b/vault/router.go @@ -485,6 +485,9 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc } } + reqTokenEntry := req.TokenEntry() + req.SetTokenEntry(nil) + // Reset the request before returning defer func() { req.Path = originalPath @@ -506,6 +509,8 @@ func (r *Router) routeCommon(ctx context.Context, req *logical.Request, existenc req.MountAccessor = re.mountEntry.Accessor req.EntityID = originalEntityID + + req.SetTokenEntry(reqTokenEntry) }() // Invoke the backend diff --git a/vault/router_test.go b/vault/router_test.go index 929ece733982..b992419f700b 100644 --- a/vault/router_test.go +++ b/vault/router_test.go @@ -31,6 +31,10 @@ type NoopBackend struct { } func (n *NoopBackend) HandleRequest(ctx context.Context, req *logical.Request) (*logical.Response, error) { + if req.TokenEntry() != nil { + panic("got a non-nil TokenEntry") + } + var err error resp := n.Response if n.RequestHandler != nil { @@ -171,6 +175,9 @@ func TestRouter_Mount(t *testing.T) { req := &logical.Request{ Path: "prod/aws/foo", } + req.SetTokenEntry(&logical.TokenEntry{ + ID: "foo", + }) resp, err := r.Route(context.Background(), req) if err != nil { t.Fatalf("err: %v", err) @@ -178,6 +185,9 @@ func TestRouter_Mount(t *testing.T) { if resp != nil { t.Fatalf("bad: %v", resp) } + if req.TokenEntry() == nil || req.TokenEntry().ID != "foo" { + t.Fatalf("unexpected value for token entry: %v", req.TokenEntry()) + } // Verify the path if len(n.Paths) != 1 || n.Paths[0] != "foo" { diff --git a/vault/token_store.go b/vault/token_store.go index 349a8696d1d2..d986a79f528e 100644 --- a/vault/token_store.go +++ b/vault/token_store.go @@ -79,7 +79,7 @@ var ( // is particularly useful to fetch the accessor of the client token and get it // populated in the logical request along with the client token. The accessor // of the client token can get audit logged. -func (c *Core) LookupToken(token string) (*TokenEntry, error) { +func (c *Core) LookupToken(token string) (*logical.TokenEntry, error) { if token == "" { return nil, fmt.Errorf("missing client token") } @@ -130,7 +130,7 @@ type TokenStore struct { saltLock sync.RWMutex salt *salt.Salt - tidyLock int64 + tidyLock *int32 identityPoliciesDeriverFunc func(string) (*identity.Entity, []string, error) } @@ -150,6 +150,7 @@ func NewTokenStore(ctx context.Context, logger log.Logger, c *Core, config *logi tokensPendingDeletion: &sync.Map{}, saltLock: sync.RWMutex{}, identityPoliciesDeriverFunc: c.fetchEntityAndDerivedPolicies, + tidyLock: new(int32), } if c.policyStore != nil { @@ -551,118 +552,6 @@ func (ts *TokenStore) Salt(ctx context.Context) (*salt.Salt, error) { return salt, nil } -// TokenEntry is used to represent a given token -type TokenEntry struct { - // ID of this entry, generally a random UUID - ID string `json:"id" mapstructure:"id" structs:"id" sentinel:""` - - // Accessor for this token, a random UUID - Accessor string `json:"accessor" mapstructure:"accessor" structs:"accessor" sentinel:""` - - // Parent token, used for revocation trees - Parent string `json:"parent" mapstructure:"parent" structs:"parent" sentinel:""` - - // Which named policies should be used - Policies []string `json:"policies" mapstructure:"policies" structs:"policies"` - - // Used for audit trails, this is something like "auth/user/login" - Path string `json:"path" mapstructure:"path" structs:"path"` - - // Used for auditing. This could include things like "source", "user", "ip" - Meta map[string]string `json:"meta" mapstructure:"meta" structs:"meta" sentinel:"meta"` - - // Used for operators to be able to associate with the source - DisplayName string `json:"display_name" mapstructure:"display_name" structs:"display_name"` - - // Used to restrict the number of uses (zero is unlimited). This is to - // support one-time-tokens (generalized). There are a few special values: - // if it's -1 it has run through its use counts and is executing its final - // use; if it's -2 it is tainted, which means revocation is currently - // running on it; and if it's -3 it's also tainted but revocation - // previously ran and failed, so this hints the tidy function to try it - // again. - NumUses int `json:"num_uses" mapstructure:"num_uses" structs:"num_uses"` - - // Time of token creation - CreationTime int64 `json:"creation_time" mapstructure:"creation_time" structs:"creation_time" sentinel:""` - - // Duration set when token was created - TTL time.Duration `json:"ttl" mapstructure:"ttl" structs:"ttl" sentinel:""` - - // Explicit maximum TTL on the token - ExplicitMaxTTL time.Duration `json:"explicit_max_ttl" mapstructure:"explicit_max_ttl" structs:"explicit_max_ttl" sentinel:""` - - // If set, the role that was used for parameters at creation time - Role string `json:"role" mapstructure:"role" structs:"role"` - - // If set, the period of the token. This is only used when created directly - // through the create endpoint; periods managed by roles or other auth - // backends are subject to those renewal rules. - Period time.Duration `json:"period" mapstructure:"period" structs:"period" sentinel:""` - - // These are the deprecated fields - DisplayNameDeprecated string `json:"DisplayName" mapstructure:"DisplayName" structs:"DisplayName" sentinel:""` - NumUsesDeprecated int `json:"NumUses" mapstructure:"NumUses" structs:"NumUses" sentinel:""` - CreationTimeDeprecated int64 `json:"CreationTime" mapstructure:"CreationTime" structs:"CreationTime" sentinel:""` - ExplicitMaxTTLDeprecated time.Duration `json:"ExplicitMaxTTL" mapstructure:"ExplicitMaxTTL" structs:"ExplicitMaxTTL" sentinel:""` - - EntityID string `json:"entity_id" mapstructure:"entity_id" structs:"entity_id"` - - // The set of CIDRs that this token can be used with - BoundCIDRs []*sockaddr.SockAddrMarshaler `json:"bound_cidrs"` -} - -func (te *TokenEntry) SentinelGet(key string) (interface{}, error) { - if te == nil { - return nil, nil - } - switch key { - case "period": - return te.Period, nil - - case "period_seconds": - return int64(te.Period.Seconds()), nil - - case "explicit_max_ttl": - return te.ExplicitMaxTTL, nil - - case "explicit_max_ttl_seconds": - return int64(te.ExplicitMaxTTL.Seconds()), nil - - case "creation_ttl": - return te.TTL, nil - - case "creation_ttl_seconds": - return int64(te.TTL.Seconds()), nil - - case "creation_time": - return time.Unix(te.CreationTime, 0).Format(time.RFC3339Nano), nil - - case "creation_time_unix": - return time.Unix(te.CreationTime, 0), nil - - case "meta", "metadata": - return te.Meta, nil - } - - return nil, nil -} - -func (te *TokenEntry) SentinelKeys() []string { - return []string{ - "period", - "period_seconds", - "explicit_max_ttl", - "explicit_max_ttl_seconds", - "creation_ttl", - "creation_ttl_seconds", - "creation_time", - "creation_time_unix", - "meta", - "metadata", - } -} - // tsRoleEntry contains token store role information type tsRoleEntry struct { // The name of the role. Embedded so it can be used for pathing @@ -720,8 +609,8 @@ func (ts *TokenStore) SaltID(ctx context.Context, id string) (string, error) { } // RootToken is used to generate a new token with root privileges and no parent -func (ts *TokenStore) rootToken(ctx context.Context) (*TokenEntry, error) { - te := &TokenEntry{ +func (ts *TokenStore) rootToken(ctx context.Context) (*logical.TokenEntry, error) { + te := &logical.TokenEntry{ Policies: []string{"root"}, Path: "auth/token/root", DisplayName: "root", @@ -763,7 +652,7 @@ func (ts *TokenStore) tokenStoreAccessorList(ctx context.Context, req *logical.R // createAccessor is used to create an identifier for the token ID. // A storage index, mapping the accessor to the token ID is also created. -func (ts *TokenStore) createAccessor(ctx context.Context, entry *TokenEntry) error { +func (ts *TokenStore) createAccessor(ctx context.Context, entry *logical.TokenEntry) error { defer metrics.MeasureSince([]string{"token", "createAccessor"}, time.Now()) // Create a random accessor @@ -798,7 +687,7 @@ func (ts *TokenStore) createAccessor(ctx context.Context, entry *TokenEntry) err // Create is used to create a new token entry. The entry is assigned // a newly generated ID if not provided. -func (ts *TokenStore) create(ctx context.Context, entry *TokenEntry) error { +func (ts *TokenStore) create(ctx context.Context, entry *logical.TokenEntry) error { defer metrics.MeasureSince([]string{"token", "create"}, time.Now()) // Generate an ID if necessary if entry.ID == "" { @@ -830,14 +719,14 @@ func (ts *TokenStore) create(ctx context.Context, entry *TokenEntry) error { // Store is used to store an updated token entry without writing the // secondary index. -func (ts *TokenStore) store(ctx context.Context, entry *TokenEntry) error { +func (ts *TokenStore) store(ctx context.Context, entry *logical.TokenEntry) error { defer metrics.MeasureSince([]string{"token", "store"}, time.Now()) return ts.storeCommon(ctx, entry, false) } // storeCommon handles the actual storage of an entry, possibly generating // secondary indexes -func (ts *TokenStore) storeCommon(ctx context.Context, entry *TokenEntry, writeSecondary bool) error { +func (ts *TokenStore) storeCommon(ctx context.Context, entry *logical.TokenEntry, writeSecondary bool) error { saltedID, err := ts.SaltID(ctx, entry.ID) if err != nil { return err @@ -895,7 +784,7 @@ func (ts *TokenStore) storeCommon(ctx context.Context, entry *TokenEntry, writeS // locking here isn't perfect, as other parts of the code may update an entry, // but usually none after the entry is already created...so this is pretty // good. -func (ts *TokenStore) UseToken(ctx context.Context, te *TokenEntry) (*TokenEntry, error) { +func (ts *TokenStore) UseToken(ctx context.Context, te *logical.TokenEntry) (*logical.TokenEntry, error) { if te == nil { return nil, fmt.Errorf("invalid token entry provided for use count decrementing") } @@ -955,7 +844,7 @@ func (ts *TokenStore) UseToken(ctx context.Context, te *TokenEntry) (*TokenEntry return te, nil } -func (ts *TokenStore) UseTokenByID(ctx context.Context, id string) (*TokenEntry, error) { +func (ts *TokenStore) UseTokenByID(ctx context.Context, id string) (*logical.TokenEntry, error) { te, err := ts.Lookup(ctx, id) if err != nil { return te, err @@ -965,7 +854,7 @@ func (ts *TokenStore) UseTokenByID(ctx context.Context, id string) (*TokenEntry, } // Lookup is used to find a token given its ID. It acquires a read lock, then calls lookupSalted. -func (ts *TokenStore) Lookup(ctx context.Context, id string) (*TokenEntry, error) { +func (ts *TokenStore) Lookup(ctx context.Context, id string) (*logical.TokenEntry, error) { defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now()) if id == "" { return nil, fmt.Errorf("cannot lookup blank token") @@ -984,7 +873,7 @@ func (ts *TokenStore) Lookup(ctx context.Context, id string) (*TokenEntry, error // lookupTainted is used to find a token that may or maynot be tainted given its // ID. It acquires a read lock, then calls lookupSalted. -func (ts *TokenStore) lookupTainted(ctx context.Context, id string) (*TokenEntry, error) { +func (ts *TokenStore) lookupTainted(ctx context.Context, id string) (*logical.TokenEntry, error) { defer metrics.MeasureSince([]string{"token", "lookup"}, time.Now()) if id == "" { return nil, fmt.Errorf("cannot lookup blank token") @@ -1004,7 +893,7 @@ func (ts *TokenStore) lookupTainted(ctx context.Context, id string) (*TokenEntry // lookupSalted is used to find a token given its salted ID. If tainted is // true, entries that are in some revocation state (currently, indicated by num // uses < 0), the entry will be returned anyways -func (ts *TokenStore) lookupSalted(ctx context.Context, saltedID string, tainted bool) (*TokenEntry, error) { +func (ts *TokenStore) lookupSalted(ctx context.Context, saltedID string, tainted bool) (*logical.TokenEntry, error) { // Lookup token path := lookupPrefix + saltedID raw, err := ts.view.Get(ctx, path) @@ -1018,7 +907,7 @@ func (ts *TokenStore) lookupSalted(ctx context.Context, saltedID string, tainted } // Unmarshal the token - entry := new(TokenEntry) + entry := new(logical.TokenEntry) if err := jsonutil.DecodeJSON(raw.Value, entry); err != nil { return nil, errwrap.Wrapf("failed to decode entry: {{err}}", err) } @@ -1063,6 +952,20 @@ func (ts *TokenStore) lookupSalted(ctx context.Context, saltedID string, tainted persistNeeded = true } + // It's a root token with unlimited creation TTL (so never had an + // expiration); this may or may not have a lease (based on when it was + // generated, for later revocation purposes) but it doesn't matter, it's + // allowed. Fast-path this. + if len(entry.Policies) == 1 && entry.Policies[0] == "root" && entry.TTL == 0 { + // If fields are getting upgraded, store the changes + if persistNeeded { + if err := ts.store(ctx, entry); err != nil { + return nil, errwrap.Wrapf("failed to persist token upgrade: {{err}}", err) + } + } + return entry, nil + } + // Perform these checks on upgraded fields, but before persisting // If we are still restoring the expiration manager, we want to ensure the @@ -1075,16 +978,9 @@ func (ts *TokenStore) lookupSalted(ctx context.Context, saltedID string, tainted return nil, errwrap.Wrapf("failed to fetch lease times: {{err}}", err) } - var ret *TokenEntry + var ret *logical.TokenEntry switch { - // It's a root token with unlimited creation TTL (so never had an - // expiration); this may or may not have a lease (based on when it was - // generated, for later revocation purposes) but it doesn't matter, it's - // allowed - case len(entry.Policies) == 1 && entry.Policies[0] == "root" && entry.TTL == 0: - ret = entry - // It's any kind of expiring token with no lease, immediately delete it case le == nil: leaseID, err := ts.expiration.CreateOrFetchRevocationLeaseByToken(entry) @@ -1396,12 +1292,12 @@ func (ts *TokenStore) lookupBySaltedAccessor(ctx context.Context, saltedAccessor func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { var tidyErrors *multierror.Error - if !atomic.CompareAndSwapInt64(&ts.tidyLock, 0, 1) { + if !atomic.CompareAndSwapInt32(ts.tidyLock, 0, 1) { ts.logger.Warn("tidy operation on tokens is already in progress") return nil, fmt.Errorf("tidy operation on tokens is already in progress") } - defer atomic.CompareAndSwapInt64(&ts.tidyLock, 1, 0) + defer atomic.CompareAndSwapInt32(ts.tidyLock, 1, 0) ts.logger.Info("beginning tidy operation on tokens") defer ts.logger.Info("finished tidy operation on tokens") @@ -1551,10 +1447,10 @@ func (ts *TokenStore) handleTidy(ctx context.Context, req *logical.Request, data if te == nil { ts.logger.Info("deleting token with nil entry", "salted_token", saltedID) - // RevokeByToken expects a '*TokenEntry'. For the + // RevokeByToken expects a '*logical.TokenEntry'. For the // purposes of tidying, it is sufficient if the token // entry only has ID set. - tokenEntry := &TokenEntry{ + tokenEntry := &logical.TokenEntry{ ID: accessorEntry.TokenID, } @@ -1754,7 +1650,7 @@ func (ts *TokenStore) handleCreateCommon(ctx context.Context, req *logical.Reque } // Setup the token entry - te := TokenEntry{ + te := logical.TokenEntry{ Parent: req.ClientToken, // The mount point is always the same since we have only one token diff --git a/vault/token_store_test.go b/vault/token_store_test.go index 21b188fbfdb9..c20c36b0c066 100644 --- a/vault/token_store_test.go +++ b/vault/token_store_test.go @@ -108,7 +108,7 @@ func TestTokenStore_TokenEntryUpgrade(t *testing.T) { } // Test the default case to ensure there are no regressions - ent := &TokenEntry{ + ent := &logical.TokenEntry{ DisplayName: "test-display-name", Path: "test", Policies: []string{"dev", "ops"}, @@ -152,7 +152,7 @@ func TestTokenStore_TokenEntryUpgrade(t *testing.T) { } // Fill in the deprecated fields and read out from proper fields - ent = &TokenEntry{ + ent = &logical.TokenEntry{ Path: "test", Policies: []string{"dev", "ops"}, DisplayNameDeprecated: "test-display-name", @@ -196,7 +196,7 @@ func TestTokenStore_TokenEntryUpgrade(t *testing.T) { } // Check if NumUses picks up a lower value - ent = &TokenEntry{ + ent = &logical.TokenEntry{ Path: "test", NumUses: 5, NumUsesDeprecated: 10, @@ -229,7 +229,7 @@ func TestTokenStore_TokenEntryUpgrade(t *testing.T) { // Switch the values from deprecated and proper field and check if the // lower value is still getting picked up - ent = &TokenEntry{ + ent = &logical.TokenEntry{ Path: "test", NumUses: 10, NumUsesDeprecated: 5, @@ -301,7 +301,7 @@ func testMakeTokenViaRequest(t testing.TB, ts *TokenStore, req *logical.Request) return resp } -func testMakeTokenDirectly(t testing.TB, ts *TokenStore, te *TokenEntry) { +func testMakeTokenDirectly(t testing.TB, ts *TokenStore, te *logical.TokenEntry) { if err := ts.create(context.Background(), te); err != nil { t.Fatal(err) } @@ -346,7 +346,7 @@ func TestTokenStore_AccessorIndex(t *testing.T) { c, _, _ := TestCoreUnsealed(t) ts := c.tokenStore - ent := &TokenEntry{ + ent := &logical.TokenEntry{ Path: "test", Policies: []string{"dev", "ops"}, TTL: time.Hour, @@ -603,7 +603,7 @@ func TestTokenStore_CreateLookup(t *testing.T) { c, _, _ := TestCoreUnsealed(t) ts := c.tokenStore - ent := &TokenEntry{ + ent := &logical.TokenEntry{ Path: "test", Policies: []string{"dev", "ops"}, TTL: time.Hour, @@ -642,7 +642,7 @@ func TestTokenStore_CreateLookup_ProvidedID(t *testing.T) { c, _, _ := TestCoreUnsealed(t) ts := c.tokenStore - ent := &TokenEntry{ + ent := &logical.TokenEntry{ ID: "foobarbaz", Path: "test", Policies: []string{"dev", "ops"}, @@ -685,7 +685,7 @@ func TestTokenStore_CreateLookup_ExpirationInRestoreMode(t *testing.T) { c, _, _ := TestCoreUnsealed(t) ts := c.tokenStore - ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} + ent := &logical.TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} if err := ts.create(context.Background(), ent); err != nil { t.Fatalf("err: %v", err) } @@ -733,7 +733,7 @@ func TestTokenStore_CreateLookup_ExpirationInRestoreMode(t *testing.T) { // Reset expiration manager to restore mode ts.expiration.restoreModeLock.Lock() - atomic.StoreInt32(&ts.expiration.restoreMode, 1) + atomic.StoreInt32(ts.expiration.restoreMode, 1) ts.expiration.restoreLocks = locksutil.CreateLocks() ts.expiration.restoreModeLock.Unlock() @@ -778,7 +778,7 @@ func TestTokenStore_UseToken(t *testing.T) { } // Create a restricted token - ent = &TokenEntry{ + ent = &logical.TokenEntry{ Path: "test", Policies: []string{"dev", "ops"}, NumUses: 2, @@ -835,7 +835,7 @@ func TestTokenStore_Revoke(t *testing.T) { c, _, _ := TestCoreUnsealed(t) ts := c.tokenStore - ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} + ent := &logical.TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} if err := ts.create(context.Background(), ent); err != nil { t.Fatalf("err: %v", err) } @@ -871,7 +871,7 @@ func TestTokenStore_Revoke_Leases(t *testing.T) { t.Fatal(err) } - ent := &TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} + ent := &logical.TokenEntry{Path: "test", Policies: []string{"dev", "ops"}} if err := ts.create(context.Background(), ent); err != nil { t.Fatalf("err: %v", err) } @@ -920,14 +920,14 @@ func TestTokenStore_Revoke_Orphan(t *testing.T) { c, _, _ := TestCoreUnsealed(t) ts := c.tokenStore - ent := &TokenEntry{ + ent := &logical.TokenEntry{ Path: "test", Policies: []string{"dev", "ops"}, TTL: time.Hour, } testMakeTokenDirectly(t, ts, ent) - ent2 := &TokenEntry{ + ent2 := &logical.TokenEntry{ Parent: ent.ID, TTL: time.Hour, } @@ -1006,24 +1006,24 @@ func BenchmarkTokenStore_RevokeTree(b *testing.B) { // Builds a TokenTree of a specified depth, so that // we may run revoke tests on it. -func buildTokenTree(t testing.TB, ts *TokenStore, depth uint64) (root *TokenEntry, children []*TokenEntry) { - root = &TokenEntry{ +func buildTokenTree(t testing.TB, ts *TokenStore, depth uint64) (root *logical.TokenEntry, children []*logical.TokenEntry) { + root = &logical.TokenEntry{ TTL: time.Hour, } testMakeTokenDirectly(t, ts, root) - frontier := []*TokenEntry{root} + frontier := []*logical.TokenEntry{root} current := uint64(0) for current < depth { - next := make([]*TokenEntry, 0, 2*len(frontier)) + next := make([]*logical.TokenEntry, 0, 2*len(frontier)) for _, node := range frontier { - left := &TokenEntry{ + left := &logical.TokenEntry{ Parent: node.ID, TTL: time.Hour, } testMakeTokenDirectly(t, ts, left) - right := &TokenEntry{ + right := &logical.TokenEntry{ Parent: node.ID, TTL: time.Hour, } @@ -1043,24 +1043,24 @@ func TestTokenStore_RevokeSelf(t *testing.T) { exp := mockExpiration(t) ts := exp.tokenStore - ent1 := &TokenEntry{ + ent1 := &logical.TokenEntry{ TTL: time.Hour, } testMakeTokenDirectly(t, ts, ent1) - ent2 := &TokenEntry{ + ent2 := &logical.TokenEntry{ Parent: ent1.ID, TTL: time.Hour, } testMakeTokenDirectly(t, ts, ent2) - ent3 := &TokenEntry{ + ent3 := &logical.TokenEntry{ Parent: ent2.ID, TTL: time.Hour, } testMakeTokenDirectly(t, ts, ent3) - ent4 := &TokenEntry{ + ent4 := &logical.TokenEntry{ Parent: ent2.ID, TTL: time.Hour, } @@ -1075,7 +1075,7 @@ func TestTokenStore_RevokeSelf(t *testing.T) { } lookup := []string{ent1.ID, ent2.ID, ent3.ID, ent4.ID} - var out *TokenEntry + var out *logical.TokenEntry for _, id := range lookup { var found bool for i := 0; i < 10; i++ { @@ -1135,7 +1135,7 @@ func TestTokenStore_HandleRequest_CreateToken_DisplayName(t *testing.T) { t.Fatalf("err: %v\nresp: %#v", err, resp) } - expected := &TokenEntry{ + expected := &logical.TokenEntry{ ID: resp.Auth.ClientToken, Accessor: resp.Auth.Accessor, Parent: root, @@ -1167,7 +1167,7 @@ func TestTokenStore_HandleRequest_CreateToken_NumUses(t *testing.T) { t.Fatalf("err: %v\nresp: %#v", err, resp) } - expected := &TokenEntry{ + expected := &logical.TokenEntry{ ID: resp.Auth.ClientToken, Accessor: resp.Auth.Accessor, Parent: root, @@ -1234,7 +1234,7 @@ func TestTokenStore_HandleRequest_CreateToken_NoPolicy(t *testing.T) { t.Fatalf("err: %v\nresp: %#v", err, resp) } - expected := &TokenEntry{ + expected := &logical.TokenEntry{ ID: resp.Auth.ClientToken, Accessor: resp.Auth.Accessor, Parent: root, diff --git a/vault/wrapping.go b/vault/wrapping.go index 6c4361370720..5f2b59d5c528 100644 --- a/vault/wrapping.go +++ b/vault/wrapping.go @@ -109,7 +109,7 @@ DONELISTHANDLING: // wrapping token ID in the audit logs, so that it can be determined from // the audit logs whether the token was ever actually used. creationTime := time.Now() - te := TokenEntry{ + te := logical.TokenEntry{ Path: req.Path, Policies: []string{"response-wrapping"}, CreationTime: creationTime.Unix(), diff --git a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go index aa81126de32a..e251add7077f 100644 --- a/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go +++ b/vendor/github.com/hashicorp/vault-plugin-secrets-kv/backend.go @@ -168,6 +168,13 @@ func (b *versionedKVBackend) upgradeDone(ctx context.Context, s logical.Storage) func pathInvalid(b *versionedKVBackend) []*framework.Path { handler := func(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { + switch req.Path { + case "metadata", "data", "delete", "undelete", "destroy": + resp := &logical.Response{} + resp.AddWarning("Non-listing operations on the root of a K/V v2 mount are not supported.") + return logical.RespondWithStatusCode(resp, req, http.StatusNotFound) + } + var subCommand string switch req.Operation { case logical.CreateOperation, logical.UpdateOperation: diff --git a/vendor/vendor.json b/vendor/vendor.json index 85bdb74f91fc..0c8051b6f1d3 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -1363,10 +1363,10 @@ "revisionTime": "2018-06-06T02:42:09Z" }, { - "checksumSHA1": "zyIz4QL1DddbGjsP6W2qbf3Uh28=", + "checksumSHA1": "grTzIH3YAjsrME6m9IBXpS77W14=", "path": "github.com/hashicorp/vault-plugin-secrets-kv", - "revision": "730a019907e33c96abded11bbbd9aadf6e5b97d3", - "revisionTime": "2018-06-06T02:56:43Z" + "revision": "f34be853827f3b0511169188ff1db08adacfd3ac", + "revisionTime": "2018-06-08T17:47:30Z" }, { "checksumSHA1": "vTfeYxi0Z1y176bjQaYh1/FpQ9s=", diff --git a/website/source/docs/auth/gcp.html.md b/website/source/docs/auth/gcp.html.md index cc91c36c5d3a..cc99f2c75573 100644 --- a/website/source/docs/auth/gcp.html.md +++ b/website/source/docs/auth/gcp.html.md @@ -379,13 +379,13 @@ If a GCE token is provided for login under an `iam` role, the service account as #### Enable GCP authentication in Vault -``` +```sh $ vault auth enable gcp ``` #### Configure the GCP Auth Method -``` +```sh $ vault write auth/gcp/config credentials=@path/to/creds.json ``` @@ -400,7 +400,7 @@ to learn more about parameters. #### Create a role -``` +```sh $ vault write auth/gcp/role/dev-role \ type="iam" \ project_id="project-123456" \ @@ -416,7 +416,7 @@ configuration for the generated auth tokens. We also expose a helper path for updating the service accounts attached to an existing `iam` role: -``` +```sh $ vault write auth/gcp/role/iam-role/service-accounts \ add='serviceAccountToAdd,...' \ remove='serviceAccountToRemove,...' \ @@ -424,7 +424,7 @@ $ vault write auth/gcp/role/iam-role/service-accounts \ and for updating the labels attached to an existing `gce` role: -``` +```sh $ vault write auth/gcp/role/gce-role/labels \ add='label1:value1,foo:bar,...' \ remove='key1,key2,...' \ @@ -439,7 +439,7 @@ parameters for role creation and updates. Once the backend is setup and roles are registered with the backend, the user can login against a specific role. -``` +```sh $ vault write auth/gcp/login role='dev-role' jwt='eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9...' ``` @@ -452,20 +452,20 @@ authenticating entity. #### Enable GCP authentication in Vault -``` +```sh $ curl $VAULT_ADDR/v1/sys/auth/gcp -d '{ "type": "gcp" }' ``` #### Configure the GCP Auth Method -``` +```sh $ curl $VAULT_ADDR/v1/auth/gcp/config \ -d '{ "credentials": "{...}" }' ``` #### Create a role -``` +```sh $ curl $VAULT_ADDR/v1/auth/gcp/role/dev-role \ -d '{ "type": "iam", "project_id": "project-123456", ...}' ``` @@ -479,7 +479,7 @@ If you have mounted the `gcp` backend with a different mountpoint, use that valu The `role` and `jwt` should be sent in the POST body encoded as JSON. -``` +```sh $ curl $VAULT_ADDR/v1/auth/gcp/login \ -d '{ "role": "dev-role", "jwt": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9..." }' ``` diff --git a/website/source/docs/configuration/seal/azurekeyvault.html.md b/website/source/docs/configuration/seal/azurekeyvault.html.md new file mode 100644 index 000000000000..af275b4fb078 --- /dev/null +++ b/website/source/docs/configuration/seal/azurekeyvault.html.md @@ -0,0 +1,92 @@ +--- +layout: "docs" +page_title: "Azure Key Vault - Seals - Configuration" +sidebar_current: "docs-configuration-seal-azurekeyvault" +description: |- + The Azure Key Vault seal configures Vault to use Azure Key Vault as the seal wrapping + mechanism. +--- + +# `azurekeyvault` Seal + +The Azure Key Vault seal configures Vault to use Azure Key Vault as the seal +wrapping mechanism. Vault Enterprise's Azure Key Vault seal is activated by one of +the following: + +* The presence of a `seal "azurekeyvault"` block in Vault's configuration file. +* The presence of the environment variable `VAULT_SEAL_TYPE` set to `azurekeyvault`. + If enabling via environment variable, all other required values specific to + Key Vault (i.e. `VAULT_AZUREKEYVAULT_VAULT_NAME`, etc.) must be also supplied, as + well as all other Azure-related environment variables that lends to successful + authentication (i.e. `AZURE_TENANT_ID`, etc.). + +## `azurekeyvault` Example + +This example shows configuring Azure Key Vault seal through the Vault +configuration file by providing all the required values: + +```hcl +seal "azurekeyvault" { + tenant_id = "46646709-b63e-4747-be42-516edeaf1e14" + client_id = "03dc33fc-16d9-4b77-8152-3ec568f8af6e" + client_secret = "DUJDS3..." + vault_name = "hc-vault" + key_name = "vault_key" +} +``` + +## `azurekeyvault` Parameters + +These parameters apply to the `seal` stanza in the Vault configuration file: + +- `tenant_id` `(string: )`: The tenant id for the Azure Active Directory organization. May + also be specified by the `AZURE_TENANT_ID` environment variable. + +- `client_id` `(string: )`: The client id for credentials to query the Azure APIs. + May also be specified by the `AZURE_CLIENT_ID` environment variable. + +- `client_secret` `(string: )`: The client id for credentials to query the Azure APIs. + May also be specified by the `AZURE_CLIENT_ID` environment variable. + +- `environment` `(string: "AZUREPUBLICCLOUD")`: The Azure Cloud environment API endpoints to use. May also + be specified by the `VAULT_AZUREKEYVAULT_VAULT_NAME` environment variable. + +- `vault_name` `(string: )`: The Key Vault vault to use the encryption keys for encryption and + decryption. May also be specified by the `VAULT_AZUREKEYVAULT_KEY_NAME` environment variable. + +- `key_name` `(string: )`: The Key Vault key to use for encryption and decryption. May also be specified by the + `VAULT_AZUREKEYVAULT_KEY_NAME` environment variable. + +## Authentication + +Authentication-related values must be provided, either as environment +variables or as configuration parameters. + +```text +Azure authentication values: + +* `AZURE_TENANT_ID` +* `AZURE_CLIENT_ID` +* `AZURE_CLIENT_SECRET` +* `AZURE_ENVIRONMENT` +``` + +Note: If Vault is hosted on Azure, Vault can use Managed Service Identities (MSI) to access Azure instead of an environment and +shared client id and secret. MSI must be [enabled](https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/qs-configure-portal-windows-vm) +on the VMs hosting Vault. + + +## `azurekeyvault` Environment Variables + +Alternatively, the Azure Key Vault seal can be activated by providing the following +environment variables: + +```text +* `VAULT_AZUREKEYVAULT_VAULT_NAME` +* `VAULT_AZUREKEYVAULT_KEY_NAME` +``` + +## Key Rotation + +This seal supports rotating keys defined in Azure Key Vault. Key metadata is stored with the +encrypted data to ensure the correct key is used during decryption operations. \ No newline at end of file diff --git a/website/source/docs/configuration/seal/gcpckms.html.md b/website/source/docs/configuration/seal/gcpckms.html.md index 2d7f036031af..53007d1c578f 100644 --- a/website/source/docs/configuration/seal/gcpckms.html.md +++ b/website/source/docs/configuration/seal/gcpckms.html.md @@ -60,7 +60,7 @@ These parameters apply to the `seal` stanza in the Vault configuration file: ## Authentication -Authentication-related values must be provided, either as enviroment +Authentication-related values must be provided, either as environment variables or as configuration parameters. ```text @@ -93,4 +93,4 @@ This seal supports rotating keys defined in Google Cloud KMS [doc](https://cloud.google.com/kms/docs/rotating-keys). Both scheduled rotation and manual rotation is supported for CKMS since the key information. Old keys version must not be disabled or deleted and are used to decrypt older data. Any new or updated data will be -encrypted with the primate key version. \ No newline at end of file +encrypted with the primary key version. diff --git a/website/source/intro/getting-started/dynamic-secrets.html.md b/website/source/intro/getting-started/dynamic-secrets.html.md index 6a8cdfb9a187..c6970313db0b 100644 --- a/website/source/intro/getting-started/dynamic-secrets.html.md +++ b/website/source/intro/getting-started/dynamic-secrets.html.md @@ -60,7 +60,7 @@ these credentials when communicating with AWS in future requests. ## Creating a Role The next step is to configure a "role". A "role" in Vault is a human-friendly -identifier to an action. Think of it like a symlink. +identifier to an action. Think of it as a symlink. Vault knows how to create an IAM user via the AWS API, but it does not know what permissions, groups, and policies you want to attach to that user. This is where diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index 9f065f634b05..7d4a2bcafbf8 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -100,6 +100,9 @@ > AWS KMS ENT + > + Azure Key Vault ENT + > GCP Cloud KMS ENT