diff --git a/.github/actions/set-up-buf/action.yml b/.github/actions/set-up-buf/action.yml index e48f254509e3..b151a307a0e4 100644 --- a/.github/actions/set-up-buf/action.yml +++ b/.github/actions/set-up-buf/action.yml @@ -60,7 +60,7 @@ runs: fi mkdir -p tmp - ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "buf-${OS}-${ARCH}.tar.gz" -O tmp/buf.tgz -R bufbuild/buf + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "buf-${OS}-${ARCH}.tar.gz" -O tmp/buf.tgz -R bufbuild/buf pushd tmp && tar -xvf buf.tgz && popd mv tmp/buf/bin/buf "$DESTINATION" rm -rf tmp diff --git a/.github/actions/set-up-gofumpt/action.yml b/.github/actions/set-up-gofumpt/action.yml index 2d046c7cb49e..884f915c29ea 100644 --- a/.github/actions/set-up-gofumpt/action.yml +++ b/.github/actions/set-up-gofumpt/action.yml @@ -56,6 +56,6 @@ runs: export OS="darwin" fi - ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "gofumpt_*_${OS}_${ARCH}" -O gofumpt -R mvdan/gofumpt + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "gofumpt_*_${OS}_${ARCH}" -O gofumpt -R mvdan/gofumpt chmod +x gofumpt mv gofumpt "$DESTINATION" diff --git a/.github/actions/set-up-gosimports/action.yml b/.github/actions/set-up-gosimports/action.yml index 3aacd2c31b4b..06623ecba052 100644 --- a/.github/actions/set-up-gosimports/action.yml +++ b/.github/actions/set-up-gosimports/action.yml @@ -57,7 +57,7 @@ runs: fi mkdir -p tmp - ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "gosimports_*_${OS}_${ARCH}.tar.gz" -O tmp/gosimports.tgz -R rinchsan/gosimports + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "gosimports_*_${OS}_${ARCH}.tar.gz" -O tmp/gosimports.tgz -R rinchsan/gosimports pushd tmp && tar -xvf gosimports.tgz && popd mv tmp/gosimports "$DESTINATION" rm -rf tmp diff --git a/.github/actions/set-up-gotestsum/action.yml b/.github/actions/set-up-gotestsum/action.yml index e45ed9e43021..6ea84c450023 100644 --- a/.github/actions/set-up-gotestsum/action.yml +++ b/.github/actions/set-up-gotestsum/action.yml @@ -54,7 +54,7 @@ runs: fi mkdir -p tmp - ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "*${OS}_${ARCH}.tar.gz" -O tmp/gotestsum.tgz -R gotestyourself/gotestsum + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "*${OS}_${ARCH}.tar.gz" -O tmp/gotestsum.tgz -R gotestyourself/gotestsum pushd tmp && tar -xvf gotestsum.tgz && popd mv tmp/gotestsum "$DESTINATION" rm -rf tmp diff --git a/.github/actions/set-up-misspell/action.yml b/.github/actions/set-up-misspell/action.yml index 4ce499eeeeb7..4447da06adda 100644 --- a/.github/actions/set-up-misspell/action.yml +++ b/.github/actions/set-up-misspell/action.yml @@ -57,7 +57,7 @@ runs: fi mkdir -p tmp - ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "misspell_*_${OS}_${ARCH}.tar.gz" -O tmp/misspell.tgz -R golangci/misspell + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "misspell_*_${OS}_${ARCH}.tar.gz" -O tmp/misspell.tgz -R golangci/misspell pushd tmp && tar -xvf misspell.tgz && popd mv tmp/misspell_"$(echo "$VERSION" | tr -d v)"_${OS}_${ARCH}/misspell "$DESTINATION" rm -rf tmp diff --git a/.github/actions/set-up-staticcheck/action.yml b/.github/actions/set-up-staticcheck/action.yml index efd253ad054d..528474c4bdfa 100644 --- a/.github/actions/set-up-staticcheck/action.yml +++ b/.github/actions/set-up-staticcheck/action.yml @@ -57,7 +57,7 @@ runs: fi mkdir -p tmp - ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "staticcheck_${OS}_${ARCH}.tar.gz" -O tmp/staticcheck.tgz -R dominikh/go-tools + ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "staticcheck_${OS}_${ARCH}.tar.gz" -O tmp/staticcheck.tgz -R dominikh/go-tools pushd tmp && tar -xvf staticcheck.tgz && popd mv tmp/staticcheck/staticcheck "$DESTINATION" rm -rf tmp diff --git a/.github/scripts/retry-command.sh b/.github/scripts/retry-command.sh index 85ace489d140..76f0c902bae0 100755 --- a/.github/scripts/retry-command.sh +++ b/.github/scripts/retry-command.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash # Copyright (c) HashiCorp, Inc. # SPDX-License-Identifier: BUSL-1.1 -set -euo pipefail +set -uo pipefail tries=5 count=0 @@ -14,5 +14,5 @@ do fi ((count++)) echo "trying again, attempt $count" - sleep 2 + sleep $count done diff --git a/api/sudo_paths.go b/api/sudo_paths.go index 24beb4bb1f2a..d458cbde0f45 100644 --- a/api/sudo_paths.go +++ b/api/sudo_paths.go @@ -28,6 +28,7 @@ var sudoPaths = map[string]*regexp.Regexp{ "/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`), "/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`), "/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`), + "/sys/internal/counters/activity/export": regexp.MustCompile(`^/sys/internal/counters/activity/export$`), "/sys/leases": regexp.MustCompile(`^/sys/leases$`), // This entry is a bit wrong... sys/leases/lookup does NOT require sudo. But sys/leases/lookup/ with a trailing // slash DOES require sudo. But the part of the Vault CLI that uses this logic doesn't pass operation-appropriate diff --git a/audit/backend_file.go b/audit/backend_file.go index a1e07ef0dd72..1068cfb35765 100644 --- a/audit/backend_file.go +++ b/audit/backend_file.go @@ -76,12 +76,12 @@ func newFileBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*FileBa return nil, err } - var opt []event.Option + sinkOpts := []event.Option{event.WithLogger(conf.Logger)} if mode, ok := conf.Config[optionMode]; ok { - opt = append(opt, event.WithFileMode(mode)) + sinkOpts = append(sinkOpts, event.WithFileMode(mode)) } - err = b.configureSinkNode(conf.MountPath, filePath, cfg.requiredFormat, opt...) + err = b.configureSinkNode(conf.MountPath, filePath, cfg.requiredFormat, sinkOpts...) if err != nil { return nil, err } diff --git a/audit/backend_socket.go b/audit/backend_socket.go index 5e98b64f5426..20e58bc0e075 100644 --- a/audit/backend_socket.go +++ b/audit/backend_socket.go @@ -70,6 +70,7 @@ func newSocketBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*Sock sinkOpts := []event.Option{ event.WithSocketType(socketType), event.WithMaxDuration(writeDeadline), + event.WithLogger(conf.Logger), } err = event.ValidateOptions(sinkOpts...) diff --git a/audit/backend_syslog.go b/audit/backend_syslog.go index a55437260782..1da9fc107ea4 100644 --- a/audit/backend_syslog.go +++ b/audit/backend_syslog.go @@ -60,6 +60,7 @@ func newSyslogBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*Sysl sinkOpts := []event.Option{ event.WithFacility(facility), event.WithTag(tag), + event.WithLogger(conf.Logger), } err = event.ValidateOptions(sinkOpts...) diff --git a/audit/broker.go b/audit/broker.go index 96cd1405f3d1..47680e7be311 100644 --- a/audit/broker.go +++ b/audit/broker.go @@ -15,7 +15,6 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/eventlogger" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internal/observability/event" "github.com/hashicorp/vault/sdk/logical" @@ -253,7 +252,7 @@ func (b *Broker) Deregister(ctx context.Context, name string) error { // LogRequest is used to ensure all the audit backends have an opportunity to // log the given request and that *at least one* succeeds. -func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret error) { +func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (retErr error) { b.RLock() defer b.RUnlock() @@ -265,18 +264,15 @@ func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret erro defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now()) defer func() { metricVal := float32(0.0) - if ret != nil { + if retErr != nil { metricVal = 1.0 } metrics.IncrCounter([]string{"audit", "log_request_failure"}, metricVal) }() - var retErr *multierror.Error - e, err := NewEvent(RequestType) if err != nil { - retErr = multierror.Append(retErr, err) - return retErr.ErrorOrNil() + return err } e.Data = in @@ -295,8 +291,7 @@ func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret erro // cancelled context and refuse to process the nodes further. ns, err := namespace.FromContext(ctx) if err != nil { - retErr = multierror.Append(retErr, fmt.Errorf("namespace missing from context: %w", err)) - return retErr.ErrorOrNil() + return fmt.Errorf("namespace missing from context: %w", err) } tempContext, auditCancel := context.WithTimeout(context.Background(), timeout) @@ -308,34 +303,38 @@ func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret erro if hasAuditPipelines(b.broker) { status, err = b.broker.Send(auditContext, event.AuditType.AsEventType(), e) if err != nil { - retErr = multierror.Append(retErr, multierror.Append(err, status.Warnings...)) - return retErr.ErrorOrNil() + return fmt.Errorf("%w: %w", err, errors.Join(status.Warnings...)) } } // Audit event ended up in at least 1 sink. if len(status.CompleteSinks()) > 0 { - return retErr.ErrorOrNil() + // We should log warnings to the operational logs regardless of whether + // we consider the overall auditing attempt to be successful. + if len(status.Warnings) > 0 { + b.logger.Error("log request underlying pipeline error(s)", "error", errors.Join(status.Warnings...)) + } + + return nil } // There were errors from inside the pipeline and we didn't write to a sink. if len(status.Warnings) > 0 { - retErr = multierror.Append(retErr, multierror.Append(errors.New("error during audit pipeline processing"), status.Warnings...)) - return retErr.ErrorOrNil() + return fmt.Errorf("error during audit pipeline processing: %w", errors.Join(status.Warnings...)) } // Handle any additional audit that is required (Enterprise/CE dependant). err = b.handleAdditionalAudit(auditContext, e) if err != nil { - retErr = multierror.Append(retErr, err) + return err } - return retErr.ErrorOrNil() + return nil } // LogResponse is used to ensure all the audit backends have an opportunity to // log the given response and that *at least one* succeeds. -func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret error) { +func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (retErr error) { b.RLock() defer b.RUnlock() @@ -347,18 +346,15 @@ func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret err defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now()) defer func() { metricVal := float32(0.0) - if ret != nil { + if retErr != nil { metricVal = 1.0 } metrics.IncrCounter([]string{"audit", "log_response_failure"}, metricVal) }() - var retErr *multierror.Error - e, err := NewEvent(ResponseType) if err != nil { - retErr = multierror.Append(retErr, err) - return retErr.ErrorOrNil() + return err } e.Data = in @@ -377,8 +373,7 @@ func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret err // cancelled context and refuse to process the nodes further. ns, err := namespace.FromContext(ctx) if err != nil { - retErr = multierror.Append(retErr, fmt.Errorf("namespace missing from context: %w", err)) - return retErr.ErrorOrNil() + return fmt.Errorf("namespace missing from context: %w", err) } tempContext, auditCancel := context.WithTimeout(context.Background(), timeout) @@ -390,29 +385,33 @@ func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret err if hasAuditPipelines(b.broker) { status, err = b.broker.Send(auditContext, event.AuditType.AsEventType(), e) if err != nil { - retErr = multierror.Append(retErr, multierror.Append(err, status.Warnings...)) - return retErr.ErrorOrNil() + return fmt.Errorf("%w: %w", err, errors.Join(status.Warnings...)) } } // Audit event ended up in at least 1 sink. if len(status.CompleteSinks()) > 0 { - return retErr.ErrorOrNil() + // We should log warnings to the operational logs regardless of whether + // we consider the overall auditing attempt to be successful. + if len(status.Warnings) > 0 { + b.logger.Error("log response underlying pipeline error(s)", "error", errors.Join(status.Warnings...)) + } + + return nil } // There were errors from inside the pipeline and we didn't write to a sink. if len(status.Warnings) > 0 { - retErr = multierror.Append(retErr, multierror.Append(errors.New("error during audit pipeline processing"), status.Warnings...)) - return retErr.ErrorOrNil() + return fmt.Errorf("error during audit pipeline processing: %w", errors.Join(status.Warnings...)) } // Handle any additional audit that is required (Enterprise/CE dependant). err = b.handleAdditionalAudit(auditContext, e) if err != nil { - retErr = multierror.Append(retErr, err) + return err } - return retErr.ErrorOrNil() + return nil } func (b *Broker) Invalidate(ctx context.Context, _ string) { diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go index a1b96ad392f1..8cfa8535cb5f 100644 --- a/builtin/logical/database/backend_test.go +++ b/builtin/logical/database/backend_test.go @@ -359,7 +359,7 @@ func TestBackend_BadConnectionString(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, _ := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, _ := postgreshelper.PrepareTestContainer(t) defer cleanup() respCheck := func(req *logical.Request) { @@ -410,7 +410,7 @@ func TestBackend_basic(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -665,7 +665,7 @@ func TestBackend_connectionCrud(t *testing.T) { dbFactory.sys = sys client := cluster.Cores[0].Client.Logical() - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Mount the database plugin. @@ -872,7 +872,7 @@ func TestBackend_roleCrud(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -1121,7 +1121,7 @@ func TestBackend_allowedRoles(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -1318,7 +1318,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go index 91737da2cf8b..41a2e99758aa 100644 --- a/builtin/logical/database/path_roles_test.go +++ b/builtin/logical/database/path_roles_test.go @@ -222,7 +222,7 @@ func TestBackend_StaticRole_Config(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -487,7 +487,7 @@ func TestBackend_StaticRole_ReadCreds(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -667,7 +667,7 @@ func TestBackend_StaticRole_Updates(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -966,7 +966,7 @@ func TestBackend_StaticRole_Role_name_check(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go index f60491a6662c..47c768374296 100644 --- a/builtin/logical/database/rollback_test.go +++ b/builtin/logical/database/rollback_test.go @@ -44,7 +44,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) { } defer lb.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") @@ -183,7 +183,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) { } defer lb.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") @@ -291,7 +291,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) { } defer lb.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}") diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go index c9917cb37458..99fc3ddf004b 100644 --- a/builtin/logical/database/rotation_test.go +++ b/builtin/logical/database/rotation_test.go @@ -63,7 +63,7 @@ func TestBackend_StaticRole_Rotation_basic(t *testing.T) { b.schedule = &TestSchedule{} - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -274,7 +274,7 @@ func TestBackend_StaticRole_Rotation_Schedule_ErrorRecover(t *testing.T) { b.schedule = &TestSchedule{} - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) t.Cleanup(cleanup) // create the database user @@ -458,7 +458,7 @@ func TestBackend_StaticRole_Rotation_NonStaticError(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -562,7 +562,7 @@ func TestBackend_StaticRole_Rotation_Revoke_user(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -783,7 +783,7 @@ func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_newer_rotation_date(t t.Fatal("could not convert to db backend") } - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user @@ -952,7 +952,7 @@ func assertWALCount(t *testing.T, s logical.Storage, expected int, key string) { type userCreator func(t *testing.T, username, password string) func TestBackend_StaticRole_Rotation_PostgreSQL(t *testing.T) { - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() uc := userCreator(func(t *testing.T, username, password string) { createTestPGUser(t, connURL, username, password, testRoleStaticCreate) @@ -1246,7 +1246,7 @@ func TestBackend_StaticRole_Rotation_LockRegression(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // Configure a connection @@ -1325,7 +1325,7 @@ func TestBackend_StaticRole_Rotation_Invalid_Role(t *testing.T) { } defer b.Cleanup(context.Background()) - cleanup, connURL := postgreshelper.PrepareTestContainer(t, "") + cleanup, connURL := postgreshelper.PrepareTestContainer(t) defer cleanup() // create the database user diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go index e2130c1cf7da..b485f7ca46bb 100644 --- a/builtin/logical/database/secret_creds.go +++ b/builtin/logical/database/secret_creds.go @@ -34,6 +34,9 @@ func (b *databaseBackend) secretCredsRenew() framework.OperationFunc { return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("username not a string") + } roleNameRaw, ok := req.Secret.InternalData["role"] if !ok { @@ -98,6 +101,9 @@ func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc { return nil, fmt.Errorf("secret is missing username internal data") } username, ok := usernameRaw.(string) + if !ok { + return nil, fmt.Errorf("username not a string") + } var resp *logical.Response diff --git a/changelog/27750.txt b/changelog/27750.txt new file mode 100644 index 000000000000..04c24fe59e7f --- /dev/null +++ b/changelog/27750.txt @@ -0,0 +1,3 @@ +```release-note:bug +core/identity: Fixed an issue where deleted/reassigned entity-aliases were not removed from in-memory database. +``` diff --git a/changelog/27790.txt b/changelog/27790.txt new file mode 100644 index 000000000000..1475d0831a2b --- /dev/null +++ b/changelog/27790.txt @@ -0,0 +1,3 @@ +```release-note:change +activity (enterprise): filter all fields in client count responses by the request namespace +``` \ No newline at end of file diff --git a/changelog/27796.txt b/changelog/27796.txt new file mode 100644 index 000000000000..7a1e7ebac3b0 --- /dev/null +++ b/changelog/27796.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: simplify the date range editing experience in the client counts dashboard. +``` \ No newline at end of file diff --git a/changelog/27809.txt b/changelog/27809.txt new file mode 100644 index 000000000000..332c9155d95a --- /dev/null +++ b/changelog/27809.txt @@ -0,0 +1,3 @@ +```release-note:improvement +audit: Ensure that any underyling errors from audit devices are logged even if we consider auditing to be a success. +``` \ No newline at end of file diff --git a/changelog/27830.txt b/changelog/27830.txt new file mode 100644 index 000000000000..6a3d7e3041f7 --- /dev/null +++ b/changelog/27830.txt @@ -0,0 +1,3 @@ +```release-note:change +activity (enterprise): remove deprecated fields distinct_entities and non_entity_tokens +``` \ No newline at end of file diff --git a/changelog/27846.txt b/changelog/27846.txt new file mode 100644 index 000000000000..50cba99062fb --- /dev/null +++ b/changelog/27846.txt @@ -0,0 +1,7 @@ +```release-note:change +activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) now requires the `sudo` ACL capability. +``` + +```release-note:improvement +activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) can now be called in non-root namespaces. Resulting records will be filtered to include the requested namespace (via `X-Vault-Namespace` header or within the path) and all child namespaces. +``` diff --git a/changelog/27859.txt b/changelog/27859.txt new file mode 100644 index 000000000000..d6836641fae7 --- /dev/null +++ b/changelog/27859.txt @@ -0,0 +1,4 @@ +```release-note:improvement +audit: sinks (file, socket, syslog) will attempt to log errors to the server operational +log before returning (if there are errors to log, and the context is done). +``` diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go index 96f5026cbb92..91e189ed2604 100644 --- a/command/agentproxyshared/auth/auth.go +++ b/command/agentproxyshared/auth/auth.go @@ -313,10 +313,11 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error { isTokenFileMethod = path == "auth/token/lookup-self" if isTokenFileMethod { token, _ := data["token"].(string) - lookupSelfClient, err := clientToUse.CloneWithHeaders() - if err != nil { + // The error is called clientErr as to not shadow the other err above it. + lookupSelfClient, clientErr := clientToUse.CloneWithHeaders() + if clientErr != nil { ah.logger.Error("failed to clone client to perform token lookup") - return err + return clientErr } lookupSelfClient.SetToken(token) secret, err = lookupSelfClient.Auth().Token().LookupSelf() diff --git a/command/operator_usage.go b/command/operator_usage.go index 117b3b78e018..199c54103663 100644 --- a/command/operator_usage.go +++ b/command/operator_usage.go @@ -132,7 +132,7 @@ func (c *OperatorUsageCommand) Run(args []string) int { c.outputTimestamps(resp.Data) out := []string{ - "Namespace path | Distinct entities | Non-Entity tokens | Secret syncs | ACME clients | Active clients", + "Namespace path | Entity Clients | Non-Entity clients | Secret syncs | ACME clients | Active clients", } out = append(out, c.namespacesOutput(resp.Data)...) @@ -233,14 +233,14 @@ func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageRes return ret, errors.New("missing counts") } - ret.entityCount, ok = jsonNumberOK(counts, "distinct_entities") + ret.entityCount, ok = jsonNumberOK(counts, "entity_clients") if !ok { - return ret, errors.New("missing distinct_entities") + return ret, errors.New("missing entity_clients") } - ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_tokens") + ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_clients") if !ok { - return ret, errors.New("missing non_entity_tokens") + return ret, errors.New("missing non_entity_clients") } // don't error if the secret syncs key is missing @@ -311,15 +311,15 @@ func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string return out } - entityCount, ok := jsonNumberOK(total, "distinct_entities") + entityCount, ok := jsonNumberOK(total, "entity_clients") if !ok { - c.UI.Error("missing distinct_entities in total") + c.UI.Error("missing entity_clients in total") return out } - tokenCount, ok := jsonNumberOK(total, "non_entity_tokens") + tokenCount, ok := jsonNumberOK(total, "non_entity_clients") if !ok { - c.UI.Error("missing non_entity_tokens in total") + c.UI.Error("missing non_entity_clients in total") return out } // don't error if secret syncs key is missing diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go index fa4dd38fbe77..7501d4c8622a 100644 --- a/command/pki_reissue_intermediate.go +++ b/command/pki_reissue_intermediate.go @@ -113,6 +113,10 @@ func (c *PKIReIssueCACommand) Run(args []string) int { } templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef) + if err != nil { + c.UI.Error(fmt.Sprintf("Error fetching parsing template certificate: %v", err)) + return 1 + } data := updateTemplateWithData(templateData, userData) return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) diff --git a/go.mod b/go.mod index 1b89401067ee..a3239740a117 100644 --- a/go.mod +++ b/go.mod @@ -221,7 +221,7 @@ require ( golang.org/x/text v0.16.0 golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d google.golang.org/api v0.181.0 - google.golang.org/grpc v1.64.0 + google.golang.org/grpc v1.64.1 google.golang.org/protobuf v1.34.1 gopkg.in/ory-am/dockertest.v3 v3.3.4 k8s.io/apimachinery v0.29.3 diff --git a/go.sum b/go.sum index 521c41da882c..915c5400d50e 100644 --- a/go.sum +++ b/go.sum @@ -3016,8 +3016,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g= google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go index 1a6643cae391..2d9c2355399d 100644 --- a/helper/testhelpers/pluginhelpers/pluginhelpers.go +++ b/helper/testhelpers/pluginhelpers/pluginhelpers.go @@ -76,15 +76,17 @@ func CompilePlugin(t testing.TB, typ consts.PluginType, pluginVersion string, pl var pluginBytes []byte dir := "" - var err error pluginRootDir := "builtin" if typ == consts.PluginTypeDatabase { pluginRootDir = "plugins" } for { - dir, err = os.Getwd() - if err != nil { - t.Fatal(err) + // So that we can assign to dir without overshadowing the other + // err variables. + var getWdErr error + dir, getWdErr = os.Getwd() + if getWdErr != nil { + t.Fatal(getWdErr) } // detect if we are in a subdirectory or the root directory and compensate if _, err := os.Stat(pluginRootDir); os.IsNotExist(err) { @@ -128,15 +130,20 @@ func CompilePlugin(t testing.TB, typ consts.PluginType, pluginVersion string, pl } // write the cached plugin if necessary - if _, err := os.Stat(pluginPath); os.IsNotExist(err) { - err = os.WriteFile(pluginPath, pluginBytes, 0o755) - } - if err != nil { - t.Fatal(err) + _, statErr := os.Stat(pluginPath) + if os.IsNotExist(statErr) { + err := os.WriteFile(pluginPath, pluginBytes, 0o755) + if err != nil { + t.Fatal(err) + } + } else { + if statErr != nil { + t.Fatal(statErr) + } } sha := sha256.New() - _, err = sha.Write(pluginBytes) + _, err := sha.Write(pluginBytes) if err != nil { t.Fatal(err) } diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go index 7e5f25c626af..f0aa1203bdda 100644 --- a/helper/testhelpers/postgresql/postgresqlhelper.go +++ b/helper/testhelpers/postgresql/postgresqlhelper.go @@ -14,13 +14,29 @@ import ( "github.com/hashicorp/vault/sdk/helper/docker" ) -func PrepareTestContainer(t *testing.T, version string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=database", +const postgresVersion = "13.4-buster" + +func defaultRunOpts(t *testing.T) docker.RunOptions { + return docker.RunOptions{ + ContainerName: "postgres", + ImageRepo: "docker.mirror.hashicorp.services/postgres", + ImageTag: postgresVersion, + Env: []string{ + "POSTGRES_PASSWORD=secret", + "POSTGRES_DB=database", + }, + Ports: []string{"5432/tcp"}, + DoNotAutoRemove: false, + LogConsumer: func(s string) { + if t.Failed() { + t.Logf("container logs: %s", s) + } + }, } +} - _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) +func PrepareTestContainer(t *testing.T) (func(), string) { + _, cleanup, url, _ := prepareTestContainer(t, defaultRunOpts(t), "secret", true, false) return cleanup, url } @@ -28,64 +44,82 @@ func PrepareTestContainer(t *testing.T, version string) (func(), string) { // PrepareTestContainerWithVaultUser will setup a test container with a Vault // admin user configured so that we can safely call rotate-root without // rotating the root DB credentials -func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context, version string) (func(), string) { - env := []string{ - "POSTGRES_PASSWORD=secret", - "POSTGRES_DB=database", +func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context) (func(), string) { + runner, cleanup, url, id := prepareTestContainer(t, defaultRunOpts(t), "secret", true, false) + + cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} + _, err := runner.RunCmdInBackground(ctx, id, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) } - runner, cleanup, url, id := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env) + return cleanup, url +} + +func PrepareTestContainerWithSSL(t *testing.T, ctx context.Context, version string) (func(), string) { + runOpts := defaultRunOpts(t) + runOpts.Cmd = []string{"-c", "log_statement=all"} + runner, cleanup, url, id := prepareTestContainer(t, runOpts, "secret", true, false) + + content := "echo 'hostssl all all all cert clientcert=verify-ca' > /var/lib/postgresql/data/pg_hba.conf" + // Copy the ssl init script into the newly running container. + buildCtx := docker.NewBuildContext() + buildCtx["ssl-conf.sh"] = docker.PathContentsFromBytes([]byte(content)) + if err := runner.CopyTo(id, "/usr/local/bin", buildCtx); err != nil { + t.Fatalf("Could not copy ssl init script into container: %v", err) + } - cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"} + // run the ssl init script to overwrite the pg_hba.conf file and set it to + // require SSL for each connection + cmd := []string{"bash", "/usr/local/bin/ssl-conf.sh"} _, err := runner.RunCmdInBackground(ctx, id, cmd) if err != nil { t.Fatalf("Could not run command (%v) in container: %v", cmd, err) } + // reload so the config changes take effect + cmd = []string{"psql", "-U", "postgres", "-c", "SELECT pg_reload_conf()"} + _, err = runner.RunCmdInBackground(ctx, id, cmd) + if err != nil { + t.Fatalf("Could not run command (%v) in container: %v", cmd, err) + } + return cleanup, url } func PrepareTestContainerWithPassword(t *testing.T, version, password string) (func(), string) { - env := []string{ + runOpts := defaultRunOpts(t) + runOpts.Env = []string{ "POSTGRES_PASSWORD=" + password, "POSTGRES_DB=database", } - _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, password, true, false, false, env) + _, cleanup, url, _ := prepareTestContainer(t, runOpts, password, true, false) return cleanup, url } func PrepareTestContainerRepmgr(t *testing.T, name, version string, envVars []string) (*docker.Runner, func(), string, string) { - env := append(envVars, + runOpts := defaultRunOpts(t) + runOpts.ImageRepo = "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr" + runOpts.ImageTag = version + runOpts.Env = append(envVars, "REPMGR_PARTNER_NODES=psql-repl-node-0,psql-repl-node-1", "REPMGR_PRIMARY_HOST=psql-repl-node-0", "REPMGR_PASSWORD=repmgrpass", "POSTGRESQL_PASSWORD=secret") + runOpts.DoNotAutoRemove = true - return prepareTestContainer(t, name, "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr", version, "secret", false, true, true, env) + return prepareTestContainer(t, runOpts, "secret", false, true) } -func prepareTestContainer(t *testing.T, name, repo, version, password string, - addSuffix, forceLocalAddr, doNotAutoRemove bool, envVars []string, +func prepareTestContainer(t *testing.T, runOpts docker.RunOptions, password string, addSuffix, forceLocalAddr bool, ) (*docker.Runner, func(), string, string) { if os.Getenv("PG_URL") != "" { return nil, func() {}, "", os.Getenv("PG_URL") } - if version == "" { - version = "11" - } - - runOpts := docker.RunOptions{ - ContainerName: name, - ImageRepo: repo, - ImageTag: version, - Env: envVars, - Ports: []string{"5432/tcp"}, - DoNotAutoRemove: doNotAutoRemove, - } - if repo == "bitnami/postgresql-repmgr" { + if runOpts.ImageRepo == "bitnami/postgresql-repmgr" { runOpts.NetworkID = os.Getenv("POSTGRES_MULTIHOST_NET") } @@ -94,7 +128,7 @@ func prepareTestContainer(t *testing.T, name, repo, version, password string, t.Fatalf("Could not start docker Postgres: %s", err) } - svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, repo)) + svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, runOpts.ImageRepo)) if err != nil { t.Fatalf("Could not start docker Postgres: %s", err) } diff --git a/internal/observability/event/options.go b/internal/observability/event/options.go index 62fb4265954e..7e419d559516 100644 --- a/internal/observability/event/options.go +++ b/internal/observability/event/options.go @@ -6,10 +6,12 @@ package event import ( "fmt" "os" + "reflect" "strconv" "strings" "time" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-uuid" ) @@ -26,6 +28,7 @@ type options struct { withSocketType string withMaxDuration time.Duration withFileMode *os.FileMode + withLogger hclog.Logger } // getDefaultOptions returns Options with their default values. @@ -201,3 +204,15 @@ func WithFileMode(mode string) Option { return nil } } + +// WithLogger provides an Option to supply a logger which will be used to write logs. +// NOTE: If no logger is supplied then logging may not be possible. +func WithLogger(l hclog.Logger) Option { + return func(o *options) error { + if l != nil && !reflect.ValueOf(l).IsNil() { + o.withLogger = l + } + + return nil + } +} diff --git a/internal/observability/event/options_test.go b/internal/observability/event/options_test.go index a3e47a2c487c..2b6a1fe3ae8f 100644 --- a/internal/observability/event/options_test.go +++ b/internal/observability/event/options_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/hashicorp/go-hclog" "github.com/stretchr/testify/require" ) @@ -423,3 +424,37 @@ func TestOptions_WithFileMode(t *testing.T) { }) } } + +// TestOptions_WithLogger exercises WithLogger Option to ensure it performs as expected. +func TestOptions_WithLogger(t *testing.T) { + t.Parallel() + + tests := map[string]struct { + value hclog.Logger + isNilExpected bool + }{ + "nil-pointer": { + value: nil, + isNilExpected: true, + }, + "logger": { + value: hclog.NewNullLogger(), + }, + } + + for name, tc := range tests { + t.Run(name, func(t *testing.T) { + t.Parallel() + + opts := &options{} + applyOption := WithLogger(tc.value) + err := applyOption(opts) + require.NoError(t, err) + if tc.isNilExpected { + require.Nil(t, opts.withLogger) + } else { + require.NotNil(t, opts.withLogger) + } + }) + } +} diff --git a/internal/observability/event/sink_file.go b/internal/observability/event/sink_file.go index 0f5e22e4c8de..ea2047e9eb73 100644 --- a/internal/observability/event/sink_file.go +++ b/internal/observability/event/sink_file.go @@ -14,6 +14,7 @@ import ( "sync" "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" ) // defaultFileMode is the default file permissions (read/write for everyone). @@ -31,6 +32,7 @@ type FileSink struct { fileMode os.FileMode path string requiredFormat string + logger hclog.Logger } // NewFileSink should be used to create a new FileSink. @@ -69,6 +71,7 @@ func NewFileSink(path string, format string, opt ...Option) (*FileSink, error) { fileMode: mode, requiredFormat: format, path: p, + logger: opts.withLogger, } // Ensure that the file can be successfully opened for writing; @@ -82,13 +85,22 @@ func NewFileSink(path string, format string, opt ...Option) (*FileSink, error) { } // Process handles writing the event to the file sink. -func (s *FileSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { +func (s *FileSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } + defer func() { + // If the context is errored (cancelled), and we were planning to return + // an error, let's also log (if we have a logger) in case the eventlogger's + // status channel and errors propagated. + if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil { + s.logger.Error("file sink error", "context", err, "error", retErr) + } + }() + if e == nil { return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) } diff --git a/internal/observability/event/sink_socket.go b/internal/observability/event/sink_socket.go index 7d7502306086..0761a46be886 100644 --- a/internal/observability/event/sink_socket.go +++ b/internal/observability/event/sink_socket.go @@ -12,6 +12,7 @@ import ( "time" "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-multierror" ) @@ -25,6 +26,7 @@ type SocketSink struct { maxDuration time.Duration socketLock sync.RWMutex connection net.Conn + logger hclog.Logger } // NewSocketSink should be used to create a new SocketSink. @@ -52,21 +54,28 @@ func NewSocketSink(address string, format string, opt ...Option) (*SocketSink, e maxDuration: opts.withMaxDuration, socketLock: sync.RWMutex{}, connection: nil, + logger: opts.withLogger, } return sink, nil } // Process handles writing the event to the socket. -func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { +func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } - s.socketLock.Lock() - defer s.socketLock.Unlock() + defer func() { + // If the context is errored (cancelled), and we were planning to return + // an error, let's also log (if we have a logger) in case the eventlogger's + // status channel and errors propagated. + if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil { + s.logger.Error("socket sink error", "context", err, "error", retErr) + } + }() if e == nil { return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) @@ -77,6 +86,9 @@ func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (*eventl return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) } + s.socketLock.Lock() + defer s.socketLock.Unlock() + // Try writing and return early if successful. err := s.write(ctx, formatted) if err == nil { diff --git a/internal/observability/event/sink_syslog.go b/internal/observability/event/sink_syslog.go index 6d6b6b6aee2f..147b87089034 100644 --- a/internal/observability/event/sink_syslog.go +++ b/internal/observability/event/sink_syslog.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/hashicorp/eventlogger" + "github.com/hashicorp/go-hclog" gsyslog "github.com/hashicorp/go-syslog" ) @@ -17,7 +18,8 @@ var _ eventlogger.Node = (*SyslogSink)(nil) // SyslogSink is a sink node which handles writing events to syslog. type SyslogSink struct { requiredFormat string - logger gsyslog.Syslogger + syslogger gsyslog.Syslogger + logger hclog.Logger } // NewSyslogSink should be used to create a new SyslogSink. @@ -38,17 +40,32 @@ func NewSyslogSink(format string, opt ...Option) (*SyslogSink, error) { return nil, fmt.Errorf("error creating syslogger: %w", err) } - return &SyslogSink{requiredFormat: format, logger: logger}, nil + syslog := &SyslogSink{ + requiredFormat: format, + syslogger: logger, + logger: opts.withLogger, + } + + return syslog, nil } // Process handles writing the event to the syslog. -func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) { +func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) { select { case <-ctx.Done(): return nil, ctx.Err() default: } + defer func() { + // If the context is errored (cancelled), and we were planning to return + // an error, let's also log (if we have a logger) in case the eventlogger's + // status channel and errors propagated. + if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil { + s.logger.Error("syslog sink error", "context", err, "error", retErr) + } + }() + if e == nil { return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter) } @@ -58,7 +75,7 @@ func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (*eventl return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter) } - _, err := s.logger.Write(formatted) + _, err := s.syslogger.Write(formatted) if err != nil { return nil, fmt.Errorf("error writing to syslog: %w", err) } diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go index 301fc15ec263..0dc0ce948602 100644 --- a/physical/postgresql/postgresql_test.go +++ b/physical/postgresql/postgresql_test.go @@ -22,7 +22,7 @@ func TestPostgreSQLBackend(t *testing.T) { // Use docker as pg backend if no url is provided via environment variables connURL := os.Getenv("PGURL") if connURL == "" { - cleanup, u := postgresql.PrepareTestContainer(t, "11.1") + cleanup, u := postgresql.PrepareTestContainer(t) defer cleanup() connURL = u } diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go index 90184e10a3eb..23b04788bbfb 100644 --- a/plugins/database/postgresql/postgresql_test.go +++ b/plugins/database/postgresql/postgresql_test.go @@ -24,7 +24,7 @@ import ( ) func getPostgreSQL(t *testing.T, options map[string]interface{}) (*PostgreSQL, func()) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) connectionDetails := map[string]interface{}{ "connection_url": connURL, @@ -70,7 +70,7 @@ func TestPostgreSQL_InitializeWithStringVals(t *testing.T) { } func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) defer cleanup() dsnConnURL, err := dbutil.ParseURL(connURL) @@ -185,7 +185,7 @@ func TestPostgreSQL_Initialize_CloudGCP(t *testing.T) { // TestPostgreSQL_PasswordAuthentication tests that the default "password_authentication" is "none", and that // an error is returned if an invalid "password_authentication" is provided. func TestPostgreSQL_PasswordAuthentication(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) defer cleanup() dsnConnURL, err := dbutil.ParseURL(connURL) @@ -227,7 +227,7 @@ func TestPostgreSQL_PasswordAuthentication(t *testing.T) { // TestPostgreSQL_PasswordAuthentication_SCRAMSHA256 tests that password_authentication works when set to scram-sha-256. // When sending an encrypted password, the raw password should still successfully authenticate the user. func TestPostgreSQL_PasswordAuthentication_SCRAMSHA256(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) defer cleanup() dsnConnURL, err := dbutil.ParseURL(connURL) @@ -1092,7 +1092,7 @@ func TestUsernameGeneration(t *testing.T) { } func TestNewUser_CustomUsername(t *testing.T) { - cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster") + cleanup, connURL := postgresql.PrepareTestContainer(t) defer cleanup() type testCase struct { diff --git a/ui/app/components/calendar-widget.js b/ui/app/components/calendar-widget.js deleted file mode 100644 index 804c9af1963a..000000000000 --- a/ui/app/components/calendar-widget.js +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import Component from '@glimmer/component'; -import { action } from '@ember/object'; -import { tracked } from '@glimmer/tracking'; -import { ARRAY_OF_MONTHS, parseAPITimestamp } from 'core/utils/date-formatters'; -import { addYears, isSameYear, subYears } from 'date-fns'; -import timestamp from 'core/utils/timestamp'; -/** - * @module CalendarWidget - * CalendarWidget component is used in the client counts dashboard to select a month/year to query the /activity endpoint. - * The component returns an object with selected date info, example: { dateType: 'endDate', monthIdx: 0, monthName: 'January', year: 2022 } - * - * @example - * ```js - * - * - * @param {string} startTimestamp - ISO timestamp string of the calendar widget's start time, displays in dropdown trigger - * @param {string} endTimestamp - ISO timestamp string for the calendar widget's end time, displays in dropdown trigger - * @param {function} selectMonth - callback function from parent - fires when selecting a month or clicking "Current billing period" - * /> - * ``` - */ -export default class CalendarWidget extends Component { - currentDate = timestamp.now(); - @tracked calendarDisplayDate = this.currentDate; // init to current date, updates when user clicks on calendar chevrons - @tracked showCalendar = false; - - // both date getters return a date object - get startDate() { - return parseAPITimestamp(this.args.startTimestamp); - } - get endDate() { - return parseAPITimestamp(this.args.endTimestamp); - } - get displayYear() { - return this.calendarDisplayDate.getFullYear(); - } - get disableFutureYear() { - return isSameYear(this.calendarDisplayDate, this.currentDate); - } - get disablePastYear() { - // calendar widget should only go as far back as the passed in start time - return isSameYear(this.calendarDisplayDate, this.startDate); - } - get widgetMonths() { - const startYear = this.startDate.getFullYear(); - const startMonthIdx = this.startDate.getMonth(); - return ARRAY_OF_MONTHS.map((month, index) => { - let readonly = false; - - // if widget is showing same year as @startTimestamp year, disable if month is before start month - if (startYear === this.displayYear && index < startMonthIdx) { - readonly = true; - } - - // if widget showing current year, disable if month is later than current month - if (this.displayYear === this.currentDate.getFullYear() && index > this.currentDate.getMonth()) { - readonly = true; - } - return { - index, - year: this.displayYear, - name: month, - readonly, - }; - }); - } - - @action - addYear() { - this.calendarDisplayDate = addYears(this.calendarDisplayDate, 1); - } - - @action - subYear() { - this.calendarDisplayDate = subYears(this.calendarDisplayDate, 1); - } - - @action - toggleShowCalendar() { - this.showCalendar = !this.showCalendar; - this.calendarDisplayDate = this.endDate; - } - - @action - handleDateShortcut(dropdown, { target }) { - this.args.selectMonth({ dateType: target.name }); // send clicked shortcut to parent callback - this.showCalendar = false; - dropdown.close(); - } - - @action - selectMonth(month, dropdown) { - const { index, year, name } = month; - this.toggleShowCalendar(); - this.args.selectMonth({ monthIdx: index, monthName: name, year, dateType: 'endDate' }); - dropdown.close(); - } -} diff --git a/ui/app/components/clients/date-range.hbs b/ui/app/components/clients/date-range.hbs new file mode 100644 index 000000000000..0d5ed63634bc --- /dev/null +++ b/ui/app/components/clients/date-range.hbs @@ -0,0 +1,102 @@ +{{! + Copyright (c) HashiCorp, Inc. + SPDX-License-Identifier: BUSL-1.1 +~}} + +
+ + Date range + + +
+ {{#if (and @startTime @endTime)}} +

{{this.formattedDate @startTime}}

+

+

{{this.formattedDate @endTime}}

+ + {{else}} + + {{/if}} +
+ + {{#if this.showEditModal}} + + + Edit date range + + +

+ The start date will be used as the client counting start time and all clients in that month will be considered new. + {{#if this.version.isEnterprise}} + We recommend setting this date as your license or billing start date to get the most accurate new and total + client count estimations. These dates are only for querying data in storage. Editing the date range does not + change any license or billing configurations. + {{/if}} +

+
+
+ Start + +
+
+ End + +
+ +
+ {{#if this.validationError}} + {{this.validationError}} + {{/if}} + {{#if this.useDefaultDates}} + + Dashboard will use the default date range from the API. + + {{/if}} +
+ + + + +
+ {{/if}} +
\ No newline at end of file diff --git a/ui/app/components/clients/date-range.ts b/ui/app/components/clients/date-range.ts new file mode 100644 index 000000000000..4704a7c74589 --- /dev/null +++ b/ui/app/components/clients/date-range.ts @@ -0,0 +1,125 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { action } from '@ember/object'; +import { service } from '@ember/service'; +import Component from '@glimmer/component'; +import { tracked } from '@glimmer/tracking'; +import { formatDateObject } from 'core/utils/client-count-utils'; +import { parseAPITimestamp } from 'core/utils/date-formatters'; +import timestamp from 'core/utils/timestamp'; +import { format } from 'date-fns'; +import type VersionService from 'vault/services/version'; +import type { HTMLElementEvent } from 'forms'; + +interface OnChangeParams { + start_time: number | undefined; + end_time: number | undefined; +} +interface Args { + onChange: (callback: OnChangeParams) => void; + startTime: string; + endTime: string; +} +/** + * @module ClientsDateRange + * ClientsDateRange components are used to display the current date range and provide a modal interface for editing the date range. + * + * @example + * + * + * + * @param {function} onChange - callback when a new range is saved. + * @param {string} [startTime] - ISO string timestamp of the current start date + * @param {string} [endTime] - ISO string timestamp of the current end date + */ + +export default class ClientsDateRangeComponent extends Component { + @service declare readonly version: VersionService; + + @tracked showEditModal = false; + @tracked startDate = ''; // format yyyy-MM + @tracked endDate = ''; // format yyyy-MM + currentMonth = format(timestamp.now(), 'yyyy-MM'); + + constructor(owner: unknown, args: Args) { + super(owner, args); + this.setTrackedFromArgs(); + } + + setTrackedFromArgs() { + if (this.args.startTime) { + this.startDate = parseAPITimestamp(this.args.startTime, 'yyyy-MM') as string; + } + if (this.args.endTime) { + this.endDate = parseAPITimestamp(this.args.endTime, 'yyyy-MM') as string; + } + } + + formattedDate = (isoTimestamp: string) => { + return parseAPITimestamp(isoTimestamp, 'MMMM yyyy'); + }; + + get useDefaultDates() { + return !this.startDate && !this.endDate; + } + + get validationError() { + if (this.useDefaultDates) { + // this means we want to reset, which is fine + return null; + } + if (!this.startDate || !this.endDate) { + return 'You must supply both start and end dates.'; + } + if (this.startDate > this.endDate) { + return 'Start date must be before end date.'; + } + return null; + } + + @action onClose() { + // since the component never gets torn down, we have to manually re-set this on close + this.setTrackedFromArgs(); + this.showEditModal = false; + } + + @action resetDates() { + this.startDate = ''; + this.endDate = ''; + } + + @action updateDate(evt: HTMLElementEvent) { + const { name, value } = evt.target; + if (name === 'end') { + this.endDate = value; + } else { + this.startDate = value; + } + } + + @action handleSave() { + if (this.validationError) return; + const params: OnChangeParams = { + start_time: undefined, + end_time: undefined, + }; + if (this.startDate) { + const [year, month] = this.startDate.split('-'); + if (year && month) { + params.start_time = formatDateObject({ monthIdx: parseInt(month) - 1, year: parseInt(year) }, false); + } + } + if (this.endDate) { + const [year, month] = this.endDate.split('-'); + if (year && month) { + params.end_time = formatDateObject({ monthIdx: parseInt(month) - 1, year: parseInt(year) }, true); + } + } + + this.args.onChange(params); + this.onClose(); + } +} diff --git a/ui/app/components/clients/page/counts.hbs b/ui/app/components/clients/page/counts.hbs index 91c401bda7b1..bb369ced1f11 100644 --- a/ui/app/components/clients/page/counts.hbs +++ b/ui/app/components/clients/page/counts.hbs @@ -18,35 +18,12 @@ Date queries are sent in UTC.

- - {{this.versionText.label}} - - -
- {{#if this.formattedStartDate}} -

{{this.formattedStartDate}}

- - {{else}} - - {{/if}} -
- - - {{this.versionText.description}} - + {{#if (eq @activity.id "no-data")}} @@ -76,11 +53,6 @@ - {{#if (or @namespace this.namespaces)}} {{/if}} {{/if}} - - -{{#if this.showBillingStartModal}} - - - Edit start month - - -

- {{this.versionText.description}} -

-

{{this.versionText.label}}

- -
- - - -
-{{/if}} \ No newline at end of file + \ No newline at end of file diff --git a/ui/app/components/clients/page/counts.ts b/ui/app/components/clients/page/counts.ts index 265c90da17db..b2cf003dddc4 100644 --- a/ui/app/components/clients/page/counts.ts +++ b/ui/app/components/clients/page/counts.ts @@ -6,10 +6,9 @@ import Component from '@glimmer/component'; import { service } from '@ember/service'; import { action } from '@ember/object'; -import { fromUnixTime, getUnixTime, isSameMonth, isAfter } from 'date-fns'; +import { fromUnixTime, isSameMonth, isAfter } from 'date-fns'; import { parseAPITimestamp } from 'core/utils/date-formatters'; -import { filterVersionHistory, formatDateObject } from 'core/utils/client-count-utils'; -import timestamp from 'core/utils/timestamp'; +import { filterVersionHistory } from 'core/utils/client-count-utils'; import type AdapterError from '@ember-data/adapter'; import type FlagsService from 'vault/services/flags'; @@ -94,17 +93,11 @@ export default class ClientsCountsPageComponent extends Component { get versionText() { return this.version.isEnterprise ? { - label: 'Billing start month', - description: - 'This date comes from your license, and defines when client counting starts. Without this starting point, the data shown is not reliable.', title: 'No billing start date found', message: 'In order to get the most from this data, please enter your billing period start month. This will ensure that the resulting data is accurate.', } : { - label: 'Client counting start date', - description: - 'This date is when client counting starts. Without this starting point, the data shown is not reliable.', title: 'No start date found', message: 'In order to get the most from this data, please enter a start month above. Vault will calculate new clients starting from that month.', @@ -174,25 +167,8 @@ export default class ClientsCountsPageComponent extends Component { } @action - onDateChange(dateObject: { dateType: string; monthIdx: number; year: number }) { - const { dateType, monthIdx, year } = dateObject; - const { config } = this.args; - const currentTimestamp = getUnixTime(timestamp.now()); - - // converts the selectedDate to unix timestamp for activity query - const selectedDate = formatDateObject({ monthIdx, year }, dateType === 'endDate'); - - if (dateType !== 'cancel') { - const start_time = { - reset: getUnixTime(config?.billingStartTimestamp) || null, // clicked 'Current billing period' in calendar widget -> resets to billing start date - currentMonth: currentTimestamp, // clicked 'Current month' from calendar widget -> defaults to currentTimestamp - startDate: selectedDate, // from "Edit billing start" modal - }[dateType]; - // endDate type is selection from calendar widget - const end_time = dateType === 'endDate' ? selectedDate : currentTimestamp; // defaults to currentTimestamp - const params = start_time !== undefined ? { start_time, end_time } : { end_time }; - this.args.onFilterChange(params); - } + onDateChange(params: { start_time: number | undefined; end_time: number | undefined }) { + this.args.onFilterChange(params); } @action diff --git a/ui/app/components/date-dropdown.js b/ui/app/components/date-dropdown.js deleted file mode 100644 index 7fee2b45510f..000000000000 --- a/ui/app/components/date-dropdown.js +++ /dev/null @@ -1,78 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import Component from '@glimmer/component'; -import { action } from '@ember/object'; -import { tracked } from '@glimmer/tracking'; -import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; -import timestamp from 'core/utils/timestamp'; -/** - * @module DateDropdown - * DateDropdown components are used to display a dropdown of months and years to handle date selection. Future dates are disabled (current month and year are selectable). - * The component returns an object with selected date info, example: { dateType: 'start', monthIdx: 0, monthName: 'January', year: 2022 } - * - * @example - * ```js - * - * ``` - * @param {function} handleSubmit - callback function from parent that the date picker triggers on submit - * @param {string} [dateType] - optional argument to give the selected month/year a type - * @param {string} [submitText] - optional argument to change submit button text - * @param {function} [validateDate] - parent function to validate date selection, receives date object and returns an error message that's passed to the inline alert - */ -export default class DateDropdown extends Component { - currentDate = timestamp.now(); - currentYear = this.currentDate.getFullYear(); // integer of year - currentMonthIdx = this.currentDate.getMonth(); // integer of month, 0 indexed - dropdownMonths = ARRAY_OF_MONTHS.map((m, i) => ({ name: m, index: i })); - dropdownYears = Array.from({ length: 5 }, (item, i) => this.currentYear - i); - - @tracked maxMonthIdx = 11; // disables months with index greater than this number, initially all months are selectable - @tracked disabledYear = null; // year as integer if current year should be disabled - @tracked selectedMonth = null; - @tracked selectedYear = null; - @tracked invalidDate = null; - - @action - selectMonth(month, dropdown) { - this.selectedMonth = month; - // disable current year if selected month is later than current month - this.disabledYear = month.index > this.currentMonthIdx ? this.currentYear : null; - dropdown.close(); - } - - @action - selectYear(year, dropdown) { - this.selectedYear = year; - // disable months after current month if selected year is current year - this.maxMonthIdx = year === this.currentYear ? this.currentMonthIdx : 11; - dropdown.close(); - } - - @action - handleSubmit() { - if (this.args.validateDate) { - this.invalidDate = null; - this.invalidDate = this.args.validateDate(new Date(this.selectedYear, this.selectedMonth.index)); - if (this.invalidDate) return; - } - const { index, name } = this.selectedMonth; - this.args.handleSubmit({ - monthIdx: index, - monthName: name, - year: this.selectedYear, - dateType: this.args.dateType, - }); - this.resetDropdown(); - } - - resetDropdown() { - this.maxMonthIdx = 11; - this.disabledYear = null; - this.selectedMonth = null; - this.selectedYear = null; - this.invalidDate = null; - } -} diff --git a/ui/app/components/transit-edit.js b/ui/app/components/transit-edit.js index c3f34b3d8d32..7949912cac66 100644 --- a/ui/app/components/transit-edit.js +++ b/ui/app/components/transit-edit.js @@ -60,10 +60,10 @@ export default Component.extend(FocusOnInsertMixin, { models: [this.key.backend, this.key.id], query: { tab: 'details' }, }, - { label: 'edit' }, + { label: 'Edit' }, ]; } else if (this.mode === 'create') { - return [...baseCrumbs, { label: 'create' }]; + return [...baseCrumbs, { label: 'Create' }]; } return baseCrumbs; }, diff --git a/ui/app/controllers/vault/cluster/access/leases/list.js b/ui/app/controllers/vault/cluster/access/leases/list.js index 31b22f0db531..6bab2deafd50 100644 --- a/ui/app/controllers/vault/cluster/access/leases/list.js +++ b/ui/app/controllers/vault/cluster/access/leases/list.js @@ -26,7 +26,7 @@ export default Controller.extend(ListController, { backendCrumb: computed('clusterController.model.name', function () { return { - label: 'leases', + label: 'Leases', text: 'Leases', path: 'vault.cluster.access.leases.list-root', model: this.clusterController.model.name, diff --git a/ui/app/controllers/vault/cluster/access/leases/show.js b/ui/app/controllers/vault/cluster/access/leases/show.js index f8f8e7efd83f..1838ad3bdc51 100644 --- a/ui/app/controllers/vault/cluster/access/leases/show.js +++ b/ui/app/controllers/vault/cluster/access/leases/show.js @@ -13,7 +13,7 @@ export default Controller.extend({ backendCrumb: computed('clusterController.model.name', function () { return { - label: 'leases', + label: 'Leases', text: 'Leases', path: 'vault.cluster.access.leases.list-root', model: this.clusterController.model.name, diff --git a/ui/app/routes/vault/cluster/clients/counts.ts b/ui/app/routes/vault/cluster/clients/counts.ts index 3c87c2922e11..b2322970db80 100644 --- a/ui/app/routes/vault/cluster/clients/counts.ts +++ b/ui/app/routes/vault/cluster/clients/counts.ts @@ -13,8 +13,6 @@ import type StoreService from 'vault/services/store'; import type VersionService from 'vault/services/version'; import type { ModelFrom } from 'vault/vault/route'; import type ClientsRoute from '../clients'; -import type ClientsActivityModel from 'vault/models/clients/activity'; -import type ClientsConfigModel from 'vault/models/clients/config'; import type ClientsCountsController from 'vault/controllers/vault/cluster/clients/counts'; import { setStartTimeQuery } from 'core/utils/client-count-utils'; diff --git a/ui/app/routes/vault/cluster/secrets/backend/actions.js b/ui/app/routes/vault/cluster/secrets/backend/actions.js index c1b413465096..2171a8e3d6cf 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/actions.js +++ b/ui/app/routes/vault/cluster/secrets/backend/actions.js @@ -47,7 +47,7 @@ export default EditBase.extend({ models: [model.secret.backend, model.secret.id], }, { - label: 'actions', + label: 'Actions', }, ]); }, diff --git a/ui/app/styles/components/clients-date-range.scss b/ui/app/styles/components/clients-date-range.scss new file mode 100644 index 000000000000..e908211109c5 --- /dev/null +++ b/ui/app/styles/components/clients-date-range.scss @@ -0,0 +1,14 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +.clients-date-range-display { + display: flex; + align-items: bottom; + > * { + margin-right: $spacing-8; + vertical-align: bottom; + align-self: end; + } +} diff --git a/ui/app/styles/core.scss b/ui/app/styles/core.scss index 84882a7bd34c..2d020970b73c 100644 --- a/ui/app/styles/core.scss +++ b/ui/app/styles/core.scss @@ -59,6 +59,7 @@ @import './components/box-label'; @import './components/calendar-widget'; @import './components/chart-container'; +@import './components/clients-date-range'; @import './components/cluster-banners'; @import './components/codemirror'; @import './components/console-ui-panel'; diff --git a/ui/app/templates/components/date-dropdown.hbs b/ui/app/templates/components/date-dropdown.hbs deleted file mode 100644 index d154886f6a46..000000000000 --- a/ui/app/templates/components/date-dropdown.hbs +++ /dev/null @@ -1,38 +0,0 @@ -{{! - Copyright (c) HashiCorp, Inc. - SPDX-License-Identifier: BUSL-1.1 -~}} - - - - - {{#each this.dropdownMonths as |month|}} - - {{/each}} - - - - {{#each this.dropdownYears as |year|}} - - {{/each}} - - - -{{#if this.invalidDate}} - -{{/if}} \ No newline at end of file diff --git a/ui/app/templates/components/oidc/key-form.hbs b/ui/app/templates/components/oidc/key-form.hbs index de8ca37298f1..3ac4843f2050 100644 --- a/ui/app/templates/components/oidc/key-form.hbs +++ b/ui/app/templates/components/oidc/key-form.hbs @@ -3,25 +3,6 @@ SPDX-License-Identifier: BUSL-1.1 ~}} - - - - {{#if @model.isNew}} - - {{else}} - - {{/if}} - - - - -

- {{if @model.isNew "Create" "Edit"}} - Key -

-
-
-
diff --git a/ui/app/templates/vault/cluster/access/oidc/keys/create.hbs b/ui/app/templates/vault/cluster/access/oidc/keys/create.hbs index 0349862a5182..daa63c22deaa 100644 --- a/ui/app/templates/vault/cluster/access/oidc/keys/create.hbs +++ b/ui/app/templates/vault/cluster/access/oidc/keys/create.hbs @@ -3,6 +3,20 @@ SPDX-License-Identifier: BUSL-1.1 ~}} + + + + + + + + +

+ Create Key +

+
+
+ + + + + + + + + +

+ Edit Key +

+
+ + (name ? `[data-test-date-range="${name}"]` : '[data-test-date-range]'), + set: '[data-test-set-date-range]', + edit: '[data-test-date-range-edit]', + editModal: '[data-test-date-range-edit-modal]', + editDate: (name: string) => `[data-test-date-edit="${name}"]`, + reset: '[data-test-date-edit="reset"]', + defaultRangeAlert: '[data-test-range-default-alert]', + validation: '[data-test-date-range-validation]', + }, statText: (label: string) => `[data-test-stat-text="${label}"]`, statTextValue: (label: string) => label ? `[data-test-stat-text="${label}"] .stat-value` : '[data-test-stat-text]', usageStats: (title: string) => `[data-test-usage-stats="${title}"]`, - dateDisplay: '[data-test-date-display]', attributionBlock: '[data-test-clients-attribution]', filterBar: '[data-test-clients-filter-bar]', - rangeDropdown: '[data-test-calendar-widget-trigger]', - monthDropdown: '[data-test-toggle-month]', - yearDropdown: '[data-test-toggle-year]', - currentBillingPeriod: '[data-test-current-billing-period]', - dateDropdown: { - toggleMonth: '[data-test-toggle-month]', - toggleYear: '[data-test-toggle-year]', - selectMonth: (month: string) => `[data-test-dropdown-month="${month}"]`, - selectYear: (year: string) => `[data-test-dropdown-year="${year}"]`, - submit: '[data-test-date-dropdown-submit]', - }, - calendarWidget: { - trigger: '[data-test-calendar-widget-trigger]', - currentMonth: '[data-test-current-month]', - currentBillingPeriod: '[data-test-current-billing-period]', - customEndMonth: '[data-test-show-calendar]', - previousYear: '[data-test-previous-year]', - nextYear: '[data-test-next-year]', - displayYear: '[data-test-display-year]', - calendarMonth: (month: string) => `[data-test-calendar-month="${month}"]`, - }, selectedAuthMount: 'div#mounts-search-select [data-test-selected-option] div', selectedNs: 'div#namespace-search-select [data-test-selected-option] div', upgradeWarning: '[data-test-clients-upgrade-warning]', diff --git a/ui/tests/integration/components/calendar-widget-test.js b/ui/tests/integration/components/calendar-widget-test.js deleted file mode 100644 index 46a1a190610f..000000000000 --- a/ui/tests/integration/components/calendar-widget-test.js +++ /dev/null @@ -1,240 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import { setupRenderingTest } from 'ember-qunit'; -import { render, click } from '@ember/test-helpers'; -import sinon from 'sinon'; -import hbs from 'htmlbars-inline-precompile'; -import calendarDropdown from 'vault/tests/pages/components/calendar-widget'; -import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; -import { subMonths, subYears } from 'date-fns'; -import timestamp from 'core/utils/timestamp'; - -module('Integration | Component | calendar-widget', function (hooks) { - setupRenderingTest(hooks); - - hooks.beforeEach(function () { - sinon.replace(timestamp, 'now', sinon.fake.returns(new Date('2018-04-03T14:15:30'))); - const CURRENT_DATE = timestamp.now(); - this.set('currentDate', CURRENT_DATE); - this.set('calendarStartDate', subMonths(CURRENT_DATE, 12)); - this.set('calendarEndDate', CURRENT_DATE); - this.set('startTimestamp', subMonths(CURRENT_DATE, 12).toISOString()); - this.set('endTimestamp', CURRENT_DATE.toISOString()); - this.set('handleClientActivityQuery', sinon.spy()); - }); - - test('it renders and disables correct months when start date is 12 months ago', async function (assert) { - assert.expect(14); - await render(hbs` - - `); - assert - .dom('[data-test-calendar-widget-trigger]') - .hasText(`Apr 2017 - Apr 2018`, 'renders and formats start and end dates'); - await calendarDropdown.openCalendar(); - assert.ok(calendarDropdown.showsCalendar, 'renders the calendar component'); - // assert months in current year are disabled/enabled correctly - const enabledMonths = ['January', 'February', 'March', 'April']; - ARRAY_OF_MONTHS.forEach(function (month) { - if (enabledMonths.includes(month)) { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); - } else { - assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); - } - }); - }); - - test('it renders and disables months before start timestamp', async function (assert) { - await render(hbs` - - `); - - await calendarDropdown.openCalendar(); - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - await calendarDropdown.clickPreviousYear(); - assert - .dom('[data-test-display-year]') - .hasText(`${subYears(this.currentDate, 1).getFullYear()}`, 'shows the previous year'); - assert.dom('[data-test-previous-year]').isDisabled('disables previous year'); - - // assert months in previous year are disabled/enabled correctly - const disabledMonths = ['January', 'February', 'March']; - ARRAY_OF_MONTHS.forEach(function (month) { - if (disabledMonths.includes(month)) { - assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); - } else { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); - } - }); - }); - - test('it calls parent callback with correct arg when clicking "Current billing period"', async function (assert) { - await render(hbs` - - `); - await calendarDropdown.menuToggle(); - await calendarDropdown.clickCurrentBillingPeriod(); - assert.propEqual( - this.handleClientActivityQuery.args[0][0], - { dateType: 'reset' }, - 'it calls parent function with reset dateType' - ); - }); - - test('it calls parent callback with correct arg when clicking "Current month"', async function (assert) { - await render(hbs` - - `); - await calendarDropdown.menuToggle(); - await calendarDropdown.clickCurrentMonth(); - assert.propEqual( - this.handleClientActivityQuery.args[0][0], - { dateType: 'currentMonth' }, - 'it calls parent function with currentMoth dateType' - ); - }); - - test('it calls parent callback with correct arg when selecting a month', async function (assert) { - await render(hbs` - - `); - await calendarDropdown.openCalendar(); - await click(`[data-test-calendar-month="April"`); - assert.propEqual( - this.handleClientActivityQuery.lastCall.lastArg, - { - dateType: 'endDate', - monthIdx: 3, - monthName: 'April', - year: 2018, - }, - 'it calls parent function with end date (current) month/year' - ); - - await calendarDropdown.openCalendar(); - await calendarDropdown.clickPreviousYear(); - await click(`[data-test-calendar-month="May"]`); - assert.propEqual( - this.handleClientActivityQuery.lastCall.lastArg, - { - dateType: 'endDate', - monthIdx: 4, - monthName: 'May', - year: 2017, - }, - 'it calls parent function with selected start date month/year' - ); - }); - - test('it disables correct months when start date 6 months ago', async function (assert) { - this.set('calendarStartDate', subMonths(this.currentDate, 6)); // Nov 3, 2017 - this.set('startTimestamp', subMonths(this.currentDate, 6).toISOString()); - await render(hbs` - - `); - - await calendarDropdown.openCalendar(); - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - - // Check start year disables correct months - await calendarDropdown.clickPreviousYear(); - assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); - const prevYearEnabled = ['October', 'November', 'December']; - ARRAY_OF_MONTHS.forEach(function (month) { - if (prevYearEnabled.includes(month)) { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); - } else { - assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is read only`); - } - }); - - // Check end year disables correct months - await click('[data-test-next-year]'); - const currYearEnabled = ['January', 'February', 'March', 'April']; - ARRAY_OF_MONTHS.forEach(function (month) { - if (currYearEnabled.includes(month)) { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); - } else { - assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); - } - }); - }); - - test('it disables correct months when start date 36 months ago', async function (assert) { - this.set('calendarStartDate', subMonths(this.currentDate, 36)); // April 3 2015 - this.set('startTimestamp', subMonths(this.currentDate, 36).toISOString()); - await render(hbs` - - `); - - await calendarDropdown.openCalendar(); - assert.dom('[data-test-next-year]').isDisabled('Future year is disabled'); - for (const year of [2017, 2016, 2015]) { - await calendarDropdown.clickPreviousYear(); - assert.dom('[data-test-display-year]').hasText(year.toString()); - } - - assert.dom('[data-test-previous-year]').isDisabled('previous year is disabled'); - assert.dom('[data-test-next-year]').isEnabled('next year is enabled'); - - const disabledMonths = ['January', 'February', 'March']; - ARRAY_OF_MONTHS.forEach(function (month) { - if (disabledMonths.includes(month)) { - assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); - } else { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); - } - }); - - await click('[data-test-next-year]'); - ARRAY_OF_MONTHS.forEach(function (month) { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled for 2016`); - }); - await click('[data-test-next-year]'); - ARRAY_OF_MONTHS.forEach(function (month) { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled for 2017`); - }); - await click('[data-test-next-year]'); - - const enabledMonths = ['January', 'February', 'March', 'April']; - ARRAY_OF_MONTHS.forEach(function (month) { - if (enabledMonths.includes(month)) { - assert.dom(`[data-test-calendar-month="${month}"]`).isNotDisabled(`${month} is enabled`); - } else { - assert.dom(`[data-test-calendar-month="${month}"]`).isDisabled(`${month} is disabled`); - } - }); - }); -}); diff --git a/ui/tests/integration/components/clients/date-range-test.js b/ui/tests/integration/components/clients/date-range-test.js new file mode 100644 index 000000000000..beec2fe491f8 --- /dev/null +++ b/ui/tests/integration/components/clients/date-range-test.js @@ -0,0 +1,106 @@ +/** + * Copyright (c) HashiCorp, Inc. + * SPDX-License-Identifier: BUSL-1.1 + */ + +import { module, test } from 'qunit'; +import { setupRenderingTest } from 'vault/tests/helpers'; +import { click, fillIn, render } from '@ember/test-helpers'; +import { hbs } from 'ember-cli-htmlbars'; +import Sinon from 'sinon'; +import timestamp from 'core/utils/timestamp'; +import { GENERAL } from 'vault/tests/helpers/general-selectors'; +import { CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; + +const DATE_RANGE = CLIENT_COUNT.dateRange; +module('Integration | Component | clients/date-range', function (hooks) { + setupRenderingTest(hooks); + + hooks.beforeEach(function () { + Sinon.replace(timestamp, 'now', Sinon.fake.returns(new Date('2018-04-03T14:15:30'))); + this.now = timestamp.now(); + this.startTime = '2018-01-01T14:15:30'; + this.endTime = '2019-01-31T14:15:30'; + this.onChange = Sinon.spy(); + this.renderComponent = async () => { + await render( + hbs`` + ); + }; + }); + + test('it renders prompt to set dates if no start time', async function (assert) { + this.startTime = undefined; + await this.renderComponent(); + + assert.dom(DATE_RANGE.set).exists(); + + await click(DATE_RANGE.set); + assert.dom(DATE_RANGE.editModal).exists(); + assert.dom(DATE_RANGE.editDate('start')).hasValue(''); + await fillIn(DATE_RANGE.editDate('start'), '2018-01'); + await fillIn(DATE_RANGE.editDate('end'), '2019-01'); + await click(GENERAL.saveButton); + assert.deepEqual(this.onChange.args[0], [ + { + end_time: 1548892800, + start_time: 1514764800, + }, + ]); + assert.dom(DATE_RANGE.editModal).doesNotExist('closes modal'); + }); + + test('it renders the date range passed and can reset it', async function (assert) { + await this.renderComponent(); + + assert.dom(DATE_RANGE.dateDisplay('start')).hasText('January 2018'); + assert.dom(DATE_RANGE.dateDisplay('end')).hasText('January 2019'); + + await click(DATE_RANGE.edit); + assert.dom(DATE_RANGE.editModal).exists(); + assert.dom(DATE_RANGE.editDate('start')).hasValue('2018-01'); + assert.dom(DATE_RANGE.editDate('end')).hasValue('2019-01'); + assert.dom(DATE_RANGE.defaultRangeAlert).doesNotExist(); + + await click(DATE_RANGE.editDate('reset')); + assert.dom(DATE_RANGE.editDate('start')).hasValue(''); + assert.dom(DATE_RANGE.editDate('end')).hasValue(''); + assert.dom(DATE_RANGE.defaultRangeAlert).exists(); + await click(GENERAL.saveButton); + assert.deepEqual(this.onChange.args[0], [{ start_time: undefined, end_time: undefined }]); + }); + + test('it does not trigger onChange if date range invalid', async function (assert) { + await this.renderComponent(); + + await click(DATE_RANGE.edit); + await click(DATE_RANGE.editDate('reset')); + await fillIn(DATE_RANGE.editDate('end'), '2017-05'); + assert.dom(DATE_RANGE.validation).hasText('You must supply both start and end dates.'); + await click(GENERAL.saveButton); + assert.false(this.onChange.called); + + await fillIn(DATE_RANGE.editDate('start'), '2018-01'); + assert.dom(DATE_RANGE.validation).hasText('Start date must be before end date.'); + await click(GENERAL.saveButton); + assert.false(this.onChange.called); + + await click(GENERAL.cancelButton); + assert.false(this.onChange.called); + assert.dom(DATE_RANGE.editModal).doesNotExist(); + }); + + test('it resets the tracked values on close', async function (assert) { + await this.renderComponent(); + + await click(DATE_RANGE.edit); + await click(DATE_RANGE.editDate('reset')); + assert.dom(DATE_RANGE.editDate('start')).hasValue(''); + assert.dom(DATE_RANGE.editDate('end')).hasValue(''); + await click(GENERAL.cancelButton); + + await click(DATE_RANGE.edit); + assert.dom(DATE_RANGE.editDate('start')).hasValue('2018-01'); + assert.dom(DATE_RANGE.editDate('end')).hasValue('2019-01'); + }); +}); diff --git a/ui/tests/integration/components/clients/page/counts-test.js b/ui/tests/integration/components/clients/page/counts-test.js index d4a3cca34c82..4577d695c6df 100644 --- a/ui/tests/integration/components/clients/page/counts-test.js +++ b/ui/tests/integration/components/clients/page/counts-test.js @@ -6,13 +6,12 @@ import { module, test } from 'qunit'; import { setupRenderingTest } from 'ember-qunit'; import { setupMirage } from 'ember-cli-mirage/test-support'; -import { render, click, settled, findAll } from '@ember/test-helpers'; +import { render, click, findAll, fillIn } from '@ember/test-helpers'; import hbs from 'htmlbars-inline-precompile'; import clientsHandler, { LICENSE_START, STATIC_NOW } from 'vault/mirage/handlers/clients'; import { getUnixTime } from 'date-fns'; import { GENERAL } from 'vault/tests/helpers/general-selectors'; import { CLIENT_COUNT } from 'vault/tests/helpers/clients/client-count-selectors'; -import { dateDropdownSelect } from 'vault/tests/helpers/clients/client-count-helpers'; import { selectChoose } from 'ember-power-select/test-support'; import timestamp from 'core/utils/timestamp'; import sinon from 'sinon'; @@ -57,38 +56,11 @@ module('Integration | Component | clients | Page::Counts', function (hooks) { `); }); - test('it should render start date label and description based on version', async function (assert) { - const versionService = this.owner.lookup('service:version'); - - await this.renderComponent(); - - assert.dom(CLIENT_COUNT.counts.startLabel).hasText('Client counting start date', 'Label renders for OSS'); - assert - .dom(CLIENT_COUNT.counts.description) - .hasText( - 'This date is when client counting starts. Without this starting point, the data shown is not reliable.', - 'Description renders for OSS' - ); - - versionService.set('type', 'enterprise'); - await settled(); - - assert.dom(CLIENT_COUNT.counts.startLabel).hasText('Billing start month', 'Label renders for Enterprise'); - assert - .dom(CLIENT_COUNT.counts.description) - .hasText( - 'This date comes from your license, and defines when client counting starts. Without this starting point, the data shown is not reliable.', - 'Description renders for Enterprise' - ); - }); - test('it should populate start and end month displays', async function (assert) { await this.renderComponent(); - assert.dom(CLIENT_COUNT.counts.startMonth).hasText('July 2023', 'Start month renders'); - assert - .dom(CLIENT_COUNT.calendarWidget.trigger) - .hasText('Jul 2023 - Jan 2024', 'Start and end months render in filter bar'); + assert.dom(CLIENT_COUNT.dateRange.dateDisplay('start')).hasText('July 2023', 'Start month renders'); + assert.dom(CLIENT_COUNT.dateRange.dateDisplay('end')).hasText('January 2024', 'End month renders'); }); test('it should render no data empty state', async function (assert) { @@ -123,31 +95,41 @@ module('Integration | Component | clients | Page::Counts', function (hooks) { }); test('it should send correct values on start and end date change', async function (assert) { - assert.expect(4); + assert.expect(3); + const jan23start = getUnixTime(new Date('2023-01-01T00:00:00Z')); + const dec23end = getUnixTime(new Date('2023-12-31T00:00:00Z')); + const jan24end = getUnixTime(new Date('2024-01-31T00:00:00Z')); - let expected = { start_time: getUnixTime(new Date('2023-01-01T00:00:00Z')), end_time: END_TIME }; + const expected = { start_time: START_TIME, end_time: END_TIME }; this.onFilterChange = (params) => { assert.deepEqual(params, expected, 'Correct values sent on filter change'); - this.startTimestamp = params.start_time || START_TIME; - this.endTimestamp = params.end_time || END_TIME; + this.set('startTimestamp', params.start_time || START_TIME); + this.set('endTimestamp', params.end_time || END_TIME); }; - + // page starts with default billing dates, which are july 23 - jan 24 await this.renderComponent(); - await dateDropdownSelect('January', '2023'); - - expected.start_time = END_TIME; - await click(CLIENT_COUNT.calendarWidget.trigger); - await click(CLIENT_COUNT.calendarWidget.currentMonth); - expected.start_time = getUnixTime(this.config.billingStartTimestamp); - await click(CLIENT_COUNT.calendarWidget.trigger); - await click(CLIENT_COUNT.calendarWidget.currentBillingPeriod); - - expected = { end_time: getUnixTime(new Date('2023-12-31T00:00:00Z')) }; - await click(CLIENT_COUNT.calendarWidget.trigger); - await click(CLIENT_COUNT.calendarWidget.customEndMonth); - await click(CLIENT_COUNT.calendarWidget.previousYear); - await click(CLIENT_COUNT.calendarWidget.calendarMonth('December')); + // First, change only the start date + expected.start_time = jan23start; + // the end date which is first set to STATIC_NOW gets recalculated + // to the end of given month/year on date range change + expected.end_time = jan24end; + await click(CLIENT_COUNT.dateRange.edit); + await fillIn(CLIENT_COUNT.dateRange.editDate('start'), '2023-01'); + await click(GENERAL.saveButton); + + // Then change only the end date + expected.end_time = dec23end; + await click(CLIENT_COUNT.dateRange.edit); + await fillIn(CLIENT_COUNT.dateRange.editDate('end'), '2023-12'); + await click(GENERAL.saveButton); + + // Then reset to billing which should reset the params + expected.start_time = undefined; + expected.end_time = undefined; + await click(CLIENT_COUNT.dateRange.edit); + await click(CLIENT_COUNT.dateRange.reset); + await click(GENERAL.saveButton); }); test('it should render namespace and auth mount filters', async function (assert) { @@ -254,9 +236,7 @@ module('Integration | Component | clients | Page::Counts', function (hooks) { await this.renderComponent(); assert.dom(GENERAL.emptyStateTitle).hasText('No start date found', 'Empty state renders'); - assert - .dom(CLIENT_COUNT.counts.startDropdown) - .exists('Date dropdown renders when start time is not provided'); + assert.dom(CLIENT_COUNT.dateRange.set).exists(); }); test('it should render catch all empty state', async function (assert) { diff --git a/ui/tests/integration/components/date-dropdown-test.js b/ui/tests/integration/components/date-dropdown-test.js deleted file mode 100644 index 56a5654754aa..000000000000 --- a/ui/tests/integration/components/date-dropdown-test.js +++ /dev/null @@ -1,165 +0,0 @@ -/** - * Copyright (c) HashiCorp, Inc. - * SPDX-License-Identifier: BUSL-1.1 - */ - -import { module, test } from 'qunit'; -import sinon from 'sinon'; -import { setupRenderingTest } from 'ember-qunit'; -import { click, render } from '@ember/test-helpers'; -import { hbs } from 'ember-cli-htmlbars'; -import { ARRAY_OF_MONTHS } from 'core/utils/date-formatters'; -import timestamp from 'core/utils/timestamp'; - -const SELECTORS = { - monthDropdown: '[data-test-toggle-month]', - specificMonth: (m) => `[data-test-dropdown-month="${m}"]`, - yearDropdown: '[data-test-toggle-year]', - specificYear: (y) => `[data-test-dropdown-year="${y}"]`, - submitButton: '[data-test-date-dropdown-submit]', - monthOptions: '[data-test-dropdown-month]', -}; - -module('Integration | Component | date-dropdown', function (hooks) { - setupRenderingTest(hooks); - - hooks.beforeEach(function () { - sinon.replace(timestamp, 'now', sinon.fake.returns(new Date('2018-04-03T14:15:30'))); - }); - - test('it renders dropdown', async function (assert) { - await render(hbs` -
- -
- `); - assert.dom(SELECTORS.submitButton).hasText('Submit', 'button renders default text'); - }); - - test('it renders dropdown and selects month and year', async function (assert) { - assert.expect(26); - const parentAction = (args) => { - assert.propEqual( - args, - { - dateType: 'start', - monthIdx: 1, - monthName: 'February', - year: 2016, - }, - 'sends correct args to parent' - ); - }; - this.set('parentAction', parentAction); - - await render(hbs` -
- -
- `); - assert.dom(SELECTORS.submitButton).isDisabled('button is disabled when no month or year selected'); - - await click(SELECTORS.monthDropdown); - - assert.dom(SELECTORS.monthOptions).exists({ count: 12 }, 'dropdown has 12 months'); - ARRAY_OF_MONTHS.forEach((month) => { - assert.dom(SELECTORS.specificMonth(month)).hasText(`${month}`, `dropdown includes ${month}`); - }); - - await click(SELECTORS.specificMonth('February')); - assert.dom(SELECTORS.monthDropdown).hasText('February', 'dropdown shows selected month'); - assert.dom('.ember-basic-dropdown-content').doesNotExist('dropdown closes after selecting month'); - - await click(SELECTORS.yearDropdown); - - assert.dom('[data-test-dropdown-year]').exists({ count: 5 }, 'dropdown has 5 years'); - for (const year of [2018, 2017, 2016, 2015, 2014]) { - assert.dom(SELECTORS.specificYear(year)).exists(); - } - - await click('[data-test-dropdown-year="2016"]'); - assert.dom(SELECTORS.yearDropdown).hasText(`2016`, `dropdown shows selected year`); - assert.dom('.ember-basic-dropdown-content').doesNotExist('dropdown closes after selecting year'); - assert.dom(SELECTORS.submitButton).isNotDisabled('button enabled when month and year selected'); - - await click(SELECTORS.submitButton); - }); - - test('selecting month first: current year enabled when current month selected', async function (assert) { - assert.expect(5); - await render(hbs` -
- -
- `); - // select current month - await click(SELECTORS.monthDropdown); - await click(SELECTORS.specificMonth('January')); - await click(SELECTORS.yearDropdown); - // all years should be selectable - for (const year of [2018, 2017, 2016, 2015, 2014]) { - assert.dom(SELECTORS.specificYear(year)).isNotDisabled(`year ${year} is selectable`); - } - }); - - test('selecting month first: it disables current year when future months selected', async function (assert) { - assert.expect(5); - await render(hbs` -
- -
- `); - - // select future month - await click(SELECTORS.monthDropdown); - await click(SELECTORS.specificMonth('June')); - await click(SELECTORS.yearDropdown); - - assert.dom(SELECTORS.specificYear(2018)).isDisabled(`current year is disabled`); - // previous years should be selectable - for (const year of [2017, 2016, 2015, 2014]) { - assert.dom(SELECTORS.specificYear(year)).isNotDisabled(`year ${year} is selectable`); - } - }); - - test('selecting year first: it disables future months when current year selected', async function (assert) { - assert.expect(12); - await render(hbs` -
- -
- `); - await click(SELECTORS.yearDropdown); - await click(SELECTORS.specificYear(2018)); - await click(SELECTORS.monthDropdown); - - const expectedSelectable = ['January', 'February', 'March', 'April']; - ARRAY_OF_MONTHS.forEach((month) => { - if (expectedSelectable.includes(month)) { - assert.dom(SELECTORS.specificMonth(month)).isNotDisabled(`${month} is selectable for current year`); - } else { - assert.dom(SELECTORS.specificMonth(month)).isDisabled(`${month} is disabled for current year`); - } - }); - }); - - test('selecting year first: it enables all months when past year is selected', async function (assert) { - assert.expect(12); - await render(hbs` -
- -
- `); - - await click(SELECTORS.yearDropdown); - await click(SELECTORS.specificYear(2017)); - await click(SELECTORS.monthDropdown); - - ARRAY_OF_MONTHS.forEach((month) => { - assert.dom(SELECTORS.specificMonth(month)).isNotDisabled(`${month} is selectable for previous year`); - }); - }); -}); diff --git a/ui/tests/integration/components/kv/page/kv-page-metadata-edit-test.js b/ui/tests/integration/components/kv/page/kv-page-metadata-edit-test.js index 8a827d1519d9..6cef2ea9f337 100644 --- a/ui/tests/integration/components/kv/page/kv-page-metadata-edit-test.js +++ b/ui/tests/integration/components/kv/page/kv-page-metadata-edit-test.js @@ -44,7 +44,7 @@ module('Integration | Component | kv | Page::Secret::Metadata::Edit', function ( { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.metadataModelCreate.backend, route: 'list' }, { label: this.metadataModelCreate.path, route: 'secret.details', model: this.metadataModelCreate.path }, - { label: 'metadata' }, + { label: 'Metadata' }, ]; await render( hbs` @@ -74,7 +74,7 @@ module('Integration | Component | kv | Page::Secret::Metadata::Edit', function ( { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.metadataModelEdit.backend, route: 'list' }, { label: this.metadataModelEdit.path, route: 'secret.details', model: this.metadataModelEdit.path }, - { label: 'metadata' }, + { label: 'Metadata' }, ]; await render( hbs` @@ -131,7 +131,7 @@ module('Integration | Component | kv | Page::Secret::Metadata::Edit', function ( { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.metadataModelEdit.backend, route: 'list' }, { label: this.metadataModelEdit.path, route: 'secret.details', model: this.metadataModelEdit.path }, - { label: 'metadata' }, + { label: 'Metadata' }, ]; await render( hbs` @@ -162,7 +162,7 @@ module('Integration | Component | kv | Page::Secret::Metadata::Edit', function ( { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.metadataModelEdit.backend, route: 'list' }, { label: this.metadataModelEdit.path, route: 'secret.details', model: this.metadataModelEdit.path }, - { label: 'metadata' }, + { label: 'Metadata' }, ]; await render( hbs` diff --git a/ui/tests/integration/components/kv/page/kv-page-version-diff-test.js b/ui/tests/integration/components/kv/page/kv-page-version-diff-test.js index 31017a635e68..981a2ff1993f 100644 --- a/ui/tests/integration/components/kv/page/kv-page-version-diff-test.js +++ b/ui/tests/integration/components/kv/page/kv-page-version-diff-test.js @@ -21,7 +21,7 @@ module('Integration | Component | kv | Page::Secret::Metadata::VersionDiff', fun hooks.beforeEach(async function () { this.backend = 'kv-engine'; this.path = 'my-secret'; - this.breadcrumbs = [{ label: 'version history', route: 'secret.metadata.versions' }, { label: 'diff' }]; + this.breadcrumbs = [{ label: 'Version History', route: 'secret.metadata.versions' }, { label: 'Diff' }]; this.store = this.owner.lookup('service:store'); this.server.post('/sys/capabilities-self', allowAllCapabilitiesStub()); diff --git a/ui/tests/integration/components/kv/page/kv-page-version-history-test.js b/ui/tests/integration/components/kv/page/kv-page-version-history-test.js index 0bfd727b411f..2ccf91c86143 100644 --- a/ui/tests/integration/components/kv/page/kv-page-version-history-test.js +++ b/ui/tests/integration/components/kv/page/kv-page-version-history-test.js @@ -39,7 +39,7 @@ module('Integration | Component | kv | Page::Secret::Metadata::Version-History', { label: 'Secrets', route: 'secrets', linkExternal: true }, { label: this.metadata.backend, route: 'list' }, { label: this.metadata.path, route: 'secret.details', model: this.metadata.path }, - { label: 'version history' }, + { label: 'Version History' }, ]; }); diff --git a/ui/tests/integration/components/ldap/page/library/create-and-edit-test.js b/ui/tests/integration/components/ldap/page/library/create-and-edit-test.js index 28e1e5af0d50..5a852b953aa2 100644 --- a/ui/tests/integration/components/ldap/page/library/create-and-edit-test.js +++ b/ui/tests/integration/components/ldap/page/library/create-and-edit-test.js @@ -37,8 +37,8 @@ module('Integration | Component | ldap | Page::Library::CreateAndEdit', function this.breadcrumbs = [ { label: 'ldap', route: 'overview' }, - { label: 'libraries', route: 'libraries' }, - { label: 'create' }, + { label: 'Libraries', route: 'libraries' }, + { label: 'Create' }, ]; this.renderComponent = () => { diff --git a/ui/tests/integration/components/ldap/page/role/create-and-edit-test.js b/ui/tests/integration/components/ldap/page/role/create-and-edit-test.js index 1e4620339a46..3391147f2559 100644 --- a/ui/tests/integration/components/ldap/page/role/create-and-edit-test.js +++ b/ui/tests/integration/components/ldap/page/role/create-and-edit-test.js @@ -40,8 +40,8 @@ module('Integration | Component | ldap | Page::Role::CreateAndEdit', function (h this.breadcrumbs = [ { label: 'ldap', route: 'overview' }, - { label: 'roles', route: 'roles' }, - { label: 'create' }, + { label: 'Roles', route: 'roles' }, + { label: 'Create' }, ]; this.renderComponent = () => { diff --git a/ui/tests/integration/components/oidc/key-form-test.js b/ui/tests/integration/components/oidc/key-form-test.js index 47b52f1763ce..55b5c297d88d 100644 --- a/ui/tests/integration/components/oidc/key-form-test.js +++ b/ui/tests/integration/components/oidc/key-form-test.js @@ -34,7 +34,7 @@ module('Integration | Component | oidc/key-form', function (hooks) { }); test('it should save new key', async function (assert) { - assert.expect(9); + assert.expect(8); this.server.post('/identity/oidc/key/test-key', (schema, req) => { assert.ok(true, 'Request made to save key'); return JSON.parse(req.requestBody); @@ -49,7 +49,6 @@ module('Integration | Component | oidc/key-form', function (hooks) { /> `); - assert.dom('[data-test-oidc-key-title]').hasText('Create Key', 'Form title renders correct text'); assert.dom(SELECTORS.keySaveButton).hasText('Create', 'Save button has correct text'); assert.dom('[data-test-input="algorithm"]').hasValue('RS256', 'default algorithm is correct'); assert.strictEqual(findAll('[data-test-field]').length, 4, 'renders all input fields'); @@ -70,7 +69,7 @@ module('Integration | Component | oidc/key-form', function (hooks) { }); test('it should update key and limit access to selected applications', async function (assert) { - assert.expect(12); + assert.expect(11); this.server.post('/identity/oidc/key/test-key', (schema, req) => { assert.ok(true, 'Request made to update key'); @@ -94,7 +93,6 @@ module('Integration | Component | oidc/key-form', function (hooks) { /> `); - assert.dom('[data-test-oidc-key-title]').hasText('Edit Key', 'Title renders correct text'); assert.dom(SELECTORS.keySaveButton).hasText('Update', 'Save button has correct text'); assert.dom('[data-test-input="name"]').isDisabled('Name input is disabled when editing'); assert.dom('[data-test-input="name"]').hasValue('test-key', 'Name input is populated with model value'); diff --git a/ui/yarn.lock b/ui/yarn.lock index 8d0fac332c8b..69aac92e1ea1 100644 --- a/ui/yarn.lock +++ b/ui/yarn.lock @@ -16,8 +16,8 @@ __metadata: linkType: hard "@babel/cli@npm:^7.24.6": - version: 7.24.7 - resolution: "@babel/cli@npm:7.24.7" + version: 7.24.8 + resolution: "@babel/cli@npm:7.24.8" dependencies: "@jridgewell/trace-mapping": ^0.3.25 "@nicolo-ribaudo/chokidar-2": 2.1.8-no-fsevents.3 @@ -38,7 +38,7 @@ __metadata: bin: babel: ./bin/babel.js babel-external-helpers: ./bin/babel-external-helpers.js - checksum: 40dfde8062de913dc5bb1c65a4d4e88ec2c438f16387c5552b1f8b0524f8af454c3b7bf12364ca0da8509c5edafdabc1527a939587678dc7825659c38d357c1d + checksum: 8a1fb83d0c2959b6a83cccab55ac1b0ffd408e1959369609071dadb38c1dc99a501d58751b6e4f0c43b751e595e9868856433b01832a19f592f004dd854a8c1f languageName: node linkType: hard @@ -52,39 +52,39 @@ __metadata: languageName: node linkType: hard -"@babel/compat-data@npm:^7.20.5, @babel/compat-data@npm:^7.22.6, @babel/compat-data@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/compat-data@npm:7.24.7" - checksum: 1fc276825dd434fe044877367dfac84171328e75a8483a6976aa28bf833b32367e90ee6df25bdd97c287d1aa8019757adcccac9153de70b1932c0d243a978ae9 +"@babel/compat-data@npm:^7.20.5, @babel/compat-data@npm:^7.22.6, @babel/compat-data@npm:^7.24.8": + version: 7.24.9 + resolution: "@babel/compat-data@npm:7.24.9" + checksum: 3590be0f7028bca0565a83f66752c0f0283b818e9e1bb7fc12912822768e379a6ff84c59d77dc64ba62c140b8500a3828d95c0ce013cd62d254a179bae38709b languageName: node linkType: hard "@babel/core@npm:^7.0.0, @babel/core@npm:^7.12.0, @babel/core@npm:^7.13.10, @babel/core@npm:^7.16.10, @babel/core@npm:^7.16.7, @babel/core@npm:^7.21.4, @babel/core@npm:^7.22.20, @babel/core@npm:^7.23.2, @babel/core@npm:^7.23.6, @babel/core@npm:^7.24.5, @babel/core@npm:^7.3.4": - version: 7.24.7 - resolution: "@babel/core@npm:7.24.7" + version: 7.24.9 + resolution: "@babel/core@npm:7.24.9" dependencies: "@ampproject/remapping": ^2.2.0 "@babel/code-frame": ^7.24.7 - "@babel/generator": ^7.24.7 - "@babel/helper-compilation-targets": ^7.24.7 - "@babel/helper-module-transforms": ^7.24.7 - "@babel/helpers": ^7.24.7 - "@babel/parser": ^7.24.7 + "@babel/generator": ^7.24.9 + "@babel/helper-compilation-targets": ^7.24.8 + "@babel/helper-module-transforms": ^7.24.9 + "@babel/helpers": ^7.24.8 + "@babel/parser": ^7.24.8 "@babel/template": ^7.24.7 - "@babel/traverse": ^7.24.7 - "@babel/types": ^7.24.7 + "@babel/traverse": ^7.24.8 + "@babel/types": ^7.24.9 convert-source-map: ^2.0.0 debug: ^4.1.0 gensync: ^1.0.0-beta.2 json5: ^2.2.3 semver: ^6.3.1 - checksum: 017497e2a1b4683a885219eef7d2aee83c1c0cf353506b2e180b73540ec28841d8ef1ea1837fa69f8c561574b24ddd72f04764b27b87afedfe0a07299ccef24d + checksum: eae273bee154d6a059e742a2bb7a58b03438a1f70d7909887a28258b29556dc99bcd5cbd41f13cd4755a20b0baf5e82731acb1d3690e02b7a9300fb6d1950e2c languageName: node linkType: hard "@babel/eslint-parser@npm:^7.22.15": - version: 7.24.7 - resolution: "@babel/eslint-parser@npm:7.24.7" + version: 7.24.8 + resolution: "@babel/eslint-parser@npm:7.24.8" dependencies: "@nicolo-ribaudo/eslint-scope-5-internals": 5.1.1-v1 eslint-visitor-keys: ^2.1.0 @@ -92,19 +92,19 @@ __metadata: peerDependencies: "@babel/core": ^7.11.0 eslint: ^7.5.0 || ^8.0.0 || ^9.0.0 - checksum: 0e08ccecfe48cf9dacd96fb46747014b9c3683882ae6886a17a666533f0d5e99b61e31e3992ffee0efc67d805ae8be9b2a6342ce5d66a36de8d99d88c9a244a0 + checksum: 4ca8845b6b068185af1c5b28217a005f370887cf8489983263bc7aebcc2290774a37ad9b971b78fbc3eca6a3d812306153f892b37525c3fc6be43e79c446d39e languageName: node linkType: hard -"@babel/generator@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/generator@npm:7.24.7" +"@babel/generator@npm:^7.24.8, @babel/generator@npm:^7.24.9": + version: 7.24.10 + resolution: "@babel/generator@npm:7.24.10" dependencies: - "@babel/types": ^7.24.7 + "@babel/types": ^7.24.9 "@jridgewell/gen-mapping": ^0.3.5 "@jridgewell/trace-mapping": ^0.3.25 jsesc: ^2.5.1 - checksum: 0ff31a73b15429f1287e4d57b439bba4a266f8c673bb445fe313b82f6d110f586776997eb723a777cd7adad9d340edd162aea4973a90112c5d0cfcaf6686844b + checksum: eb13806e9eb76932ea5205502a85ea650a991c7a6f757fbe859176f6d9b34b3da5a2c1f52a2c24fdbe0045a90438fe6889077e338cdd6c727619dee925af1ba6 languageName: node linkType: hard @@ -127,27 +127,27 @@ __metadata: languageName: node linkType: hard -"@babel/helper-compilation-targets@npm:^7.12.0, @babel/helper-compilation-targets@npm:^7.20.7, @babel/helper-compilation-targets@npm:^7.22.6, @babel/helper-compilation-targets@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/helper-compilation-targets@npm:7.24.7" +"@babel/helper-compilation-targets@npm:^7.12.0, @babel/helper-compilation-targets@npm:^7.20.7, @babel/helper-compilation-targets@npm:^7.22.6, @babel/helper-compilation-targets@npm:^7.24.7, @babel/helper-compilation-targets@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/helper-compilation-targets@npm:7.24.8" dependencies: - "@babel/compat-data": ^7.24.7 - "@babel/helper-validator-option": ^7.24.7 - browserslist: ^4.22.2 + "@babel/compat-data": ^7.24.8 + "@babel/helper-validator-option": ^7.24.8 + browserslist: ^4.23.1 lru-cache: ^5.1.1 semver: ^6.3.1 - checksum: dfc88bc35e223ade796c7267901728217c665adc5bc2e158f7b0ae850de14f1b7941bec4fe5950ae46236023cfbdeddd9c747c276acf9b39ca31f8dd97dc6cc6 + checksum: 40c9e87212fffccca387504b259a629615d7df10fc9080c113da6c51095d3e8b622a1409d9ed09faf2191628449ea28d582179c5148e2e993a3140234076b8da languageName: node linkType: hard -"@babel/helper-create-class-features-plugin@npm:^7.18.6, @babel/helper-create-class-features-plugin@npm:^7.21.0, @babel/helper-create-class-features-plugin@npm:^7.24.7, @babel/helper-create-class-features-plugin@npm:^7.5.5": - version: 7.24.7 - resolution: "@babel/helper-create-class-features-plugin@npm:7.24.7" +"@babel/helper-create-class-features-plugin@npm:^7.18.6, @babel/helper-create-class-features-plugin@npm:^7.21.0, @babel/helper-create-class-features-plugin@npm:^7.24.7, @babel/helper-create-class-features-plugin@npm:^7.24.8, @babel/helper-create-class-features-plugin@npm:^7.5.5": + version: 7.24.8 + resolution: "@babel/helper-create-class-features-plugin@npm:7.24.8" dependencies: "@babel/helper-annotate-as-pure": ^7.24.7 "@babel/helper-environment-visitor": ^7.24.7 "@babel/helper-function-name": ^7.24.7 - "@babel/helper-member-expression-to-functions": ^7.24.7 + "@babel/helper-member-expression-to-functions": ^7.24.8 "@babel/helper-optimise-call-expression": ^7.24.7 "@babel/helper-replace-supers": ^7.24.7 "@babel/helper-skip-transparent-expression-wrappers": ^7.24.7 @@ -155,7 +155,7 @@ __metadata: semver: ^6.3.1 peerDependencies: "@babel/core": ^7.0.0 - checksum: 371a181a1717a9b0cebc97727c8ea9ca6afa34029476a684b6030f9d1ad94dcdafd7de175da10b63ae3ba79e4e82404db8ed968ebf264b768f097e5d64faab71 + checksum: b4707e2c4a2cb504d7656168d887bf653db6fbe8ece4502e28e5798f2ec624dc606f2d6bc4820d31b4dc1b80f7d83d98db83516dda321a76c075e5f531abed0b languageName: node linkType: hard @@ -215,13 +215,13 @@ __metadata: languageName: node linkType: hard -"@babel/helper-member-expression-to-functions@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/helper-member-expression-to-functions@npm:7.24.7" +"@babel/helper-member-expression-to-functions@npm:^7.24.7, @babel/helper-member-expression-to-functions@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/helper-member-expression-to-functions@npm:7.24.8" dependencies: - "@babel/traverse": ^7.24.7 - "@babel/types": ^7.24.7 - checksum: 9fecf412f85fa23b7cf55d19eb69de39f8240426a028b141c9df2aed8cfedf20b3ec3318d40312eb7a3dec9eea792828ce0d590e0ff62da3da532482f537192c + "@babel/traverse": ^7.24.8 + "@babel/types": ^7.24.8 + checksum: bf923d05d81b06857f4ca4fe9c528c9c447a58db5ea39595bb559eae2fce01a8266173db0fd6a2ec129d7bbbb9bb22f4e90008252f7c66b422c76630a878a4bc languageName: node linkType: hard @@ -235,9 +235,9 @@ __metadata: languageName: node linkType: hard -"@babel/helper-module-transforms@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/helper-module-transforms@npm:7.24.7" +"@babel/helper-module-transforms@npm:^7.24.7, @babel/helper-module-transforms@npm:^7.24.8, @babel/helper-module-transforms@npm:^7.24.9": + version: 7.24.9 + resolution: "@babel/helper-module-transforms@npm:7.24.9" dependencies: "@babel/helper-environment-visitor": ^7.24.7 "@babel/helper-module-imports": ^7.24.7 @@ -246,7 +246,7 @@ __metadata: "@babel/helper-validator-identifier": ^7.24.7 peerDependencies: "@babel/core": ^7.0.0 - checksum: ddff3b41c2667876b4e4e73d961168f48a5ec9560c95c8c2d109e6221f9ca36c6f90c6317eb7a47f2a3c99419c356e529a86b79174cad0d4f7a61960866b88ca + checksum: ffcf11b678a8d3e6a243285cb5262c37f4d47d507653420c1f7f0bd27076e88177f2b7158850d1a470fcfe923426a2e6571c554c455a90c9755ff488ac36ac40 languageName: node linkType: hard @@ -259,10 +259,10 @@ __metadata: languageName: node linkType: hard -"@babel/helper-plugin-utils@npm:^7.0.0, @babel/helper-plugin-utils@npm:^7.10.4, @babel/helper-plugin-utils@npm:^7.12.13, @babel/helper-plugin-utils@npm:^7.14.5, @babel/helper-plugin-utils@npm:^7.18.6, @babel/helper-plugin-utils@npm:^7.20.2, @babel/helper-plugin-utils@npm:^7.22.5, @babel/helper-plugin-utils@npm:^7.24.7, @babel/helper-plugin-utils@npm:^7.8.0, @babel/helper-plugin-utils@npm:^7.8.3": - version: 7.24.7 - resolution: "@babel/helper-plugin-utils@npm:7.24.7" - checksum: 81f2a15751d892e4a8fce25390f973363a5b27596167861d2d6eab0f61856eb2ba389b031a9f19f669c0bd4dd601185828d3cebafd25431be7a1696f2ce3ef68 +"@babel/helper-plugin-utils@npm:^7.0.0, @babel/helper-plugin-utils@npm:^7.10.4, @babel/helper-plugin-utils@npm:^7.12.13, @babel/helper-plugin-utils@npm:^7.14.5, @babel/helper-plugin-utils@npm:^7.18.6, @babel/helper-plugin-utils@npm:^7.20.2, @babel/helper-plugin-utils@npm:^7.22.5, @babel/helper-plugin-utils@npm:^7.24.7, @babel/helper-plugin-utils@npm:^7.24.8, @babel/helper-plugin-utils@npm:^7.8.0, @babel/helper-plugin-utils@npm:^7.8.3": + version: 7.24.8 + resolution: "@babel/helper-plugin-utils@npm:7.24.8" + checksum: 73b1a83ba8bcee21dc94de2eb7323207391715e4369fd55844bb15cf13e3df6f3d13a40786d990e6370bf0f571d94fc31f70dec96c1d1002058258c35ca3767a languageName: node linkType: hard @@ -321,10 +321,10 @@ __metadata: languageName: node linkType: hard -"@babel/helper-string-parser@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/helper-string-parser@npm:7.24.7" - checksum: 09568193044a578743dd44bf7397940c27ea693f9812d24acb700890636b376847a611cdd0393a928544e79d7ad5b8b916bd8e6e772bc8a10c48a647a96e7b1a +"@babel/helper-string-parser@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/helper-string-parser@npm:7.24.8" + checksum: 39b03c5119216883878655b149148dc4d2e284791e969b19467a9411fccaa33f7a713add98f4db5ed519535f70ad273cdadfd2eb54d47ebbdeac5083351328ce languageName: node linkType: hard @@ -335,10 +335,10 @@ __metadata: languageName: node linkType: hard -"@babel/helper-validator-option@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/helper-validator-option@npm:7.24.7" - checksum: 9689166bf3f777dd424c026841c8cd651e41b21242dbfd4569a53086179a3e744c8eddd56e9d10b54142270141c91581b53af0d7c00c82d552d2540e2a919f7e +"@babel/helper-validator-option@npm:^7.24.7, @babel/helper-validator-option@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/helper-validator-option@npm:7.24.8" + checksum: a52442dfa74be6719c0608fee3225bd0493c4057459f3014681ea1a4643cd38b68ff477fe867c4b356da7330d085f247f0724d300582fa4ab9a02efaf34d107c languageName: node linkType: hard @@ -354,13 +354,13 @@ __metadata: languageName: node linkType: hard -"@babel/helpers@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/helpers@npm:7.24.7" +"@babel/helpers@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/helpers@npm:7.24.8" dependencies: "@babel/template": ^7.24.7 - "@babel/types": ^7.24.7 - checksum: 934da58098a3670ca7f9f42425b9c44d0ca4f8fad815c0f51d89fc7b64c5e0b4c7d5fec038599de691229ada737edeaf72fad3eba8e16dd5842e8ea447f76b66 + "@babel/types": ^7.24.8 + checksum: 2d7301b1b9c91e518c4766bae171230e243d98461c15eabbd44f8f9c83c297fad5c4a64ad80cfec9ca8e90412fc2b41ee86d7eb35dc8a7611c268bcf1317fe46 languageName: node linkType: hard @@ -376,12 +376,12 @@ __metadata: languageName: node linkType: hard -"@babel/parser@npm:^7.20.15, @babel/parser@npm:^7.24.7, @babel/parser@npm:^7.4.5": - version: 7.24.7 - resolution: "@babel/parser@npm:7.24.7" +"@babel/parser@npm:^7.20.15, @babel/parser@npm:^7.24.7, @babel/parser@npm:^7.24.8, @babel/parser@npm:^7.4.5": + version: 7.24.8 + resolution: "@babel/parser@npm:7.24.8" bin: parser: ./bin/babel-parser.js - checksum: fc9d2c4c8712f89672edc55c0dc5cf640dcec715b56480f111f85c2bc1d507e251596e4110d65796690a96ac37a4b60432af90b3e97bb47e69d4ef83872dbbd6 + checksum: 76f866333bfbd53800ac027419ae523bb0137fc63daa968232eb780e4390136bb6e497cb4a2cf6051a2c318aa335c2e6d2adc17079d60691ae7bde89b28c5688 languageName: node linkType: hard @@ -825,21 +825,21 @@ __metadata: languageName: node linkType: hard -"@babel/plugin-transform-classes@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/plugin-transform-classes@npm:7.24.7" +"@babel/plugin-transform-classes@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/plugin-transform-classes@npm:7.24.8" dependencies: "@babel/helper-annotate-as-pure": ^7.24.7 - "@babel/helper-compilation-targets": ^7.24.7 + "@babel/helper-compilation-targets": ^7.24.8 "@babel/helper-environment-visitor": ^7.24.7 "@babel/helper-function-name": ^7.24.7 - "@babel/helper-plugin-utils": ^7.24.7 + "@babel/helper-plugin-utils": ^7.24.8 "@babel/helper-replace-supers": ^7.24.7 "@babel/helper-split-export-declaration": ^7.24.7 globals: ^11.1.0 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: f01cb31143730d425681e9816020cbb519c7ddb3b6ca308dfaf2821eda5699a746637fc6bf19811e2fb42cfdf8b00a21b31c754da83771a5c280077925677354 + checksum: 9c0f547d67e255b37055461df9c1a578c29bf59c7055bd5b40b07b92e5448af3ca8d853d50056125b7dae9bfe3a4cf1559d61b9ccbc3d2578dd43f15386f12fe languageName: node linkType: hard @@ -855,14 +855,14 @@ __metadata: languageName: node linkType: hard -"@babel/plugin-transform-destructuring@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/plugin-transform-destructuring@npm:7.24.7" +"@babel/plugin-transform-destructuring@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/plugin-transform-destructuring@npm:7.24.8" dependencies: - "@babel/helper-plugin-utils": ^7.24.7 + "@babel/helper-plugin-utils": ^7.24.8 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: b9637b27faf9d24a8119bc5a1f98a2f47c69e6441bd8fc71163500be316253a72173308a93122bcf27d8d314ace43344c976f7291cf6376767f408350c8149d4 + checksum: 0b4bd3d608979a1e5bd97d9d42acd5ad405c7fffa61efac4c7afd8e86ea6c2d91ab2d94b6a98d63919571363fe76e0b03c4ff161f0f60241b895842596e4a999 languageName: node linkType: hard @@ -1008,16 +1008,16 @@ __metadata: languageName: node linkType: hard -"@babel/plugin-transform-modules-commonjs@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/plugin-transform-modules-commonjs@npm:7.24.7" +"@babel/plugin-transform-modules-commonjs@npm:^7.24.7, @babel/plugin-transform-modules-commonjs@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/plugin-transform-modules-commonjs@npm:7.24.8" dependencies: - "@babel/helper-module-transforms": ^7.24.7 - "@babel/helper-plugin-utils": ^7.24.7 + "@babel/helper-module-transforms": ^7.24.8 + "@babel/helper-plugin-utils": ^7.24.8 "@babel/helper-simple-access": ^7.24.7 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: bfda2a0297197ed342e2a02e5f9847a489a3ae40a4a7d7f00f4aeb8544a85e9006e0c5271c8f61f39bc97975ef2717b5594cf9486694377a53433162909d64c1 + checksum: a4cf95b1639c33382064b44558f73ee5fac023f2a94d16e549d2bb55ceebd5cbc10fcddd505d08cd5bc97f5a64af9fd155512358b7dcf7b1a0082e8945cf21c5 languageName: node linkType: hard @@ -1132,16 +1132,16 @@ __metadata: languageName: node linkType: hard -"@babel/plugin-transform-optional-chaining@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/plugin-transform-optional-chaining@npm:7.24.7" +"@babel/plugin-transform-optional-chaining@npm:^7.24.7, @babel/plugin-transform-optional-chaining@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/plugin-transform-optional-chaining@npm:7.24.8" dependencies: - "@babel/helper-plugin-utils": ^7.24.7 + "@babel/helper-plugin-utils": ^7.24.8 "@babel/helper-skip-transparent-expression-wrappers": ^7.24.7 "@babel/plugin-syntax-optional-chaining": ^7.8.3 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: 877e7ce9097d475132c7f4d1244de50bb2fd37993dc4580c735f18f8cbc49282f6e77752821bcad5ca9d3528412d2c8a7ee0aa7ca71bb680ff82648e7a5fed25 + checksum: 45e55e3a2fffb89002d3f89aef59c141610f23b60eee41e047380bffc40290b59f64fc649aa7ec5281f73d41b2065410d788acc6afaad2a9f44cad6e8af04442 languageName: node linkType: hard @@ -1277,28 +1277,28 @@ __metadata: languageName: node linkType: hard -"@babel/plugin-transform-typeof-symbol@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/plugin-transform-typeof-symbol@npm:7.24.7" +"@babel/plugin-transform-typeof-symbol@npm:^7.24.8": + version: 7.24.8 + resolution: "@babel/plugin-transform-typeof-symbol@npm:7.24.8" dependencies: - "@babel/helper-plugin-utils": ^7.24.7 + "@babel/helper-plugin-utils": ^7.24.8 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: 6bd16b9347614d44187d8f8ee23ebd7be30dabf3632eed5ff0415f35a482e827de220527089eae9cdfb75e85aa72db0e141ebc2247c4b1187c1abcdacdc34895 + checksum: 8663a8e7347cedf181001d99c88cf794b6598c3d82f324098510fe8fb8bd22113995526a77aa35a3cc5d70ffd0617a59dd0d10311a9bf0e1a3a7d3e59b900c00 languageName: node linkType: hard "@babel/plugin-transform-typescript@npm:^7.13.0, @babel/plugin-transform-typescript@npm:^7.16.8, @babel/plugin-transform-typescript@npm:^7.20.13, @babel/plugin-transform-typescript@npm:^7.24.7": - version: 7.24.7 - resolution: "@babel/plugin-transform-typescript@npm:7.24.7" + version: 7.24.8 + resolution: "@babel/plugin-transform-typescript@npm:7.24.8" dependencies: "@babel/helper-annotate-as-pure": ^7.24.7 - "@babel/helper-create-class-features-plugin": ^7.24.7 - "@babel/helper-plugin-utils": ^7.24.7 + "@babel/helper-create-class-features-plugin": ^7.24.8 + "@babel/helper-plugin-utils": ^7.24.8 "@babel/plugin-syntax-typescript": ^7.24.7 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: 6b367d1e3d6bdbe438878a76436fc6903e2b4fd7c31fa036d43865570d282679ec3f7c0306399851f2866a9b36686a0ea8c343df3750f70d427f1fe20ca54310 + checksum: 4dcdc0ca2b523ccfb216ad7e68d2954576e42d83956e0e65626ad1ece17da85cb1122b6c350c4746db927996060466c879945d40cde156a94019f30587fef41a languageName: node linkType: hard @@ -1385,13 +1385,13 @@ __metadata: linkType: hard "@babel/preset-env@npm:^7.16.5, @babel/preset-env@npm:^7.16.7, @babel/preset-env@npm:^7.20.2, @babel/preset-env@npm:^7.24.6": - version: 7.24.7 - resolution: "@babel/preset-env@npm:7.24.7" + version: 7.24.8 + resolution: "@babel/preset-env@npm:7.24.8" dependencies: - "@babel/compat-data": ^7.24.7 - "@babel/helper-compilation-targets": ^7.24.7 - "@babel/helper-plugin-utils": ^7.24.7 - "@babel/helper-validator-option": ^7.24.7 + "@babel/compat-data": ^7.24.8 + "@babel/helper-compilation-targets": ^7.24.8 + "@babel/helper-plugin-utils": ^7.24.8 + "@babel/helper-validator-option": ^7.24.8 "@babel/plugin-bugfix-firefox-class-in-computed-class-key": ^7.24.7 "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": ^7.24.7 "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": ^7.24.7 @@ -1422,9 +1422,9 @@ __metadata: "@babel/plugin-transform-block-scoping": ^7.24.7 "@babel/plugin-transform-class-properties": ^7.24.7 "@babel/plugin-transform-class-static-block": ^7.24.7 - "@babel/plugin-transform-classes": ^7.24.7 + "@babel/plugin-transform-classes": ^7.24.8 "@babel/plugin-transform-computed-properties": ^7.24.7 - "@babel/plugin-transform-destructuring": ^7.24.7 + "@babel/plugin-transform-destructuring": ^7.24.8 "@babel/plugin-transform-dotall-regex": ^7.24.7 "@babel/plugin-transform-duplicate-keys": ^7.24.7 "@babel/plugin-transform-dynamic-import": ^7.24.7 @@ -1437,7 +1437,7 @@ __metadata: "@babel/plugin-transform-logical-assignment-operators": ^7.24.7 "@babel/plugin-transform-member-expression-literals": ^7.24.7 "@babel/plugin-transform-modules-amd": ^7.24.7 - "@babel/plugin-transform-modules-commonjs": ^7.24.7 + "@babel/plugin-transform-modules-commonjs": ^7.24.8 "@babel/plugin-transform-modules-systemjs": ^7.24.7 "@babel/plugin-transform-modules-umd": ^7.24.7 "@babel/plugin-transform-named-capturing-groups-regex": ^7.24.7 @@ -1447,7 +1447,7 @@ __metadata: "@babel/plugin-transform-object-rest-spread": ^7.24.7 "@babel/plugin-transform-object-super": ^7.24.7 "@babel/plugin-transform-optional-catch-binding": ^7.24.7 - "@babel/plugin-transform-optional-chaining": ^7.24.7 + "@babel/plugin-transform-optional-chaining": ^7.24.8 "@babel/plugin-transform-parameters": ^7.24.7 "@babel/plugin-transform-private-methods": ^7.24.7 "@babel/plugin-transform-private-property-in-object": ^7.24.7 @@ -1458,7 +1458,7 @@ __metadata: "@babel/plugin-transform-spread": ^7.24.7 "@babel/plugin-transform-sticky-regex": ^7.24.7 "@babel/plugin-transform-template-literals": ^7.24.7 - "@babel/plugin-transform-typeof-symbol": ^7.24.7 + "@babel/plugin-transform-typeof-symbol": ^7.24.8 "@babel/plugin-transform-unicode-escapes": ^7.24.7 "@babel/plugin-transform-unicode-property-regex": ^7.24.7 "@babel/plugin-transform-unicode-regex": ^7.24.7 @@ -1467,11 +1467,11 @@ __metadata: babel-plugin-polyfill-corejs2: ^0.4.10 babel-plugin-polyfill-corejs3: ^0.10.4 babel-plugin-polyfill-regenerator: ^0.6.1 - core-js-compat: ^3.31.0 + core-js-compat: ^3.37.1 semver: ^6.3.1 peerDependencies: "@babel/core": ^7.0.0-0 - checksum: 1a82c883c7404359b19b7436d0aab05f8dd4e89e8b1f7de127cc65d0ff6a9b1c345211d9c038f5b6e8f93d26f091fa9c73812d82851026ab4ec93f5ed0f0d675 + checksum: efea0039dbb089c9cc0b792b9ac0eef949699584b4c622e2abea062b44b1a0fbcda6ad25e2263ae36a69586889b4a22439a1096aa8152b366e3fedd921ae66ac languageName: node linkType: hard @@ -1520,11 +1520,11 @@ __metadata: linkType: hard "@babel/runtime@npm:^7.14.0, @babel/runtime@npm:^7.17.8, @babel/runtime@npm:^7.21.0, @babel/runtime@npm:^7.8.4": - version: 7.24.7 - resolution: "@babel/runtime@npm:7.24.7" + version: 7.24.8 + resolution: "@babel/runtime@npm:7.24.8" dependencies: regenerator-runtime: ^0.14.0 - checksum: d17f29eed6f848ac15cdf4202a910b741facfb0419a9d79e5c7fa37df6362fc3227f1cc2e248cc6db5e53ddffb4caa6686c488e6e80ce3d29c36a4e74c8734ea + checksum: 6b1e4230580f67a807ad054720812bbefbb024cc2adc1159d050acbb764c4c81c7ac5f7a042c48f578987c5edc2453c71039268df059058e9501fa6023d764b0 languageName: node linkType: hard @@ -1539,32 +1539,32 @@ __metadata: languageName: node linkType: hard -"@babel/traverse@npm:^7.24.7, @babel/traverse@npm:^7.4.5": - version: 7.24.7 - resolution: "@babel/traverse@npm:7.24.7" +"@babel/traverse@npm:^7.24.7, @babel/traverse@npm:^7.24.8, @babel/traverse@npm:^7.4.5": + version: 7.24.8 + resolution: "@babel/traverse@npm:7.24.8" dependencies: "@babel/code-frame": ^7.24.7 - "@babel/generator": ^7.24.7 + "@babel/generator": ^7.24.8 "@babel/helper-environment-visitor": ^7.24.7 "@babel/helper-function-name": ^7.24.7 "@babel/helper-hoist-variables": ^7.24.7 "@babel/helper-split-export-declaration": ^7.24.7 - "@babel/parser": ^7.24.7 - "@babel/types": ^7.24.7 + "@babel/parser": ^7.24.8 + "@babel/types": ^7.24.8 debug: ^4.3.1 globals: ^11.1.0 - checksum: 7cd366afe9e7ee77e493779fdf24f67bf5595247289364f4689e29688572505eaeb886d7a8f20ebb9c29fc2de7d0895e4ff9e203e78e39ac67239724d45aa83b + checksum: ee7955476ce031613249f2b0ce9e74a3b7787c9d52e84534fcf39ad61aeb0b811a4cd83edc157608be4886f04c6ecf210861e211ba2a3db4fda729cc2048b5ed languageName: node linkType: hard -"@babel/types@npm:^7.12.13, @babel/types@npm:^7.24.7, @babel/types@npm:^7.4.4, @babel/types@npm:^7.7.2, @babel/types@npm:^7.8.3": - version: 7.24.7 - resolution: "@babel/types@npm:7.24.7" +"@babel/types@npm:^7.12.13, @babel/types@npm:^7.24.7, @babel/types@npm:^7.24.8, @babel/types@npm:^7.24.9, @babel/types@npm:^7.4.4, @babel/types@npm:^7.7.2, @babel/types@npm:^7.8.3": + version: 7.24.9 + resolution: "@babel/types@npm:7.24.9" dependencies: - "@babel/helper-string-parser": ^7.24.7 + "@babel/helper-string-parser": ^7.24.8 "@babel/helper-validator-identifier": ^7.24.7 to-fast-properties: ^2.0.0 - checksum: 3e4437fced97e02982972ce5bebd318c47d42c9be2152c0fd28c6f786cc74086cc0a8fb83b602b846e41df37f22c36254338eada1a47ef9d8a1ec92332ca3ea8 + checksum: 15cb05c45be5d4c49a749575d3742bd005d0e2e850c13fb462754983a5bc1063fbc8f6566246fc064e3e8b21a5a75a37a948f1b3f27189cc90b236fee93f5e51 languageName: node linkType: hard @@ -1588,28 +1588,28 @@ __metadata: linkType: hard "@csstools/css-parser-algorithms@npm:^2.3.1": - version: 2.7.0 - resolution: "@csstools/css-parser-algorithms@npm:2.7.0" + version: 2.7.1 + resolution: "@csstools/css-parser-algorithms@npm:2.7.1" peerDependencies: - "@csstools/css-tokenizer": ^2.3.2 - checksum: 25f27d0b647ee2a215f27b7b41e0e3337f6df93bf8b53e6e86f25b6089dd3d8597133919c1c107b5a8c737c83176305ab7818448348036cbacae30cf70c4433c + "@csstools/css-tokenizer": ^2.4.1 + checksum: 304e6f92e583042c310e368a82b694af563a395e5c55911caefe52765c5acb000b9daa17356ea8a4dd37d4d50132b76de48ced75159b169b53e134ff78b362ba languageName: node linkType: hard "@csstools/css-tokenizer@npm:^2.2.0": - version: 2.3.2 - resolution: "@csstools/css-tokenizer@npm:2.3.2" - checksum: 40c0eaba3f46134c4b8952d25c3076a69463b55c82a5b4bf9be344b3db544c6fee2d1ddb2dd7dd0afb8a347a1903b050c29c83c981aa0f8c3f33fc795bf21e58 + version: 2.4.1 + resolution: "@csstools/css-tokenizer@npm:2.4.1" + checksum: 395c51f8724ddc4851d836f484346bb3ea6a67af936dde12cbf9a57ae321372e79dee717cbe4823599eb0e6fd2d5405cf8873450e986c2fca6e6ed82e7b10219 languageName: node linkType: hard "@csstools/media-query-list-parser@npm:^2.1.4": - version: 2.1.12 - resolution: "@csstools/media-query-list-parser@npm:2.1.12" + version: 2.1.13 + resolution: "@csstools/media-query-list-parser@npm:2.1.13" peerDependencies: - "@csstools/css-parser-algorithms": ^2.7.0 - "@csstools/css-tokenizer": ^2.3.2 - checksum: 0c2655cf247fcae3ab5ea9a38264567c5d590d0b3f7d96d33cb92253e95acab25a60d66f70c15e7bf75365fa796bf19d5387991a110dd8b38ed5b1767573e113 + "@csstools/css-parser-algorithms": ^2.7.1 + "@csstools/css-tokenizer": ^2.4.1 + checksum: 7754b4b9fcc749a51a2bcd34a167ad16e7227ff087f6c4e15b3593d3342413446b72dad37f1adb99c62538730c77e3e47842987ce453fbb3849d329a39ba9ad7 languageName: node linkType: hard @@ -2029,7 +2029,7 @@ __metadata: languageName: node linkType: hard -"@embroider/addon-shim@npm:^1.0.0, @embroider/addon-shim@npm:^1.2.0, @embroider/addon-shim@npm:^1.6.0, @embroider/addon-shim@npm:^1.8.0, @embroider/addon-shim@npm:^1.8.3, @embroider/addon-shim@npm:^1.8.4, @embroider/addon-shim@npm:^1.8.6, @embroider/addon-shim@npm:^1.8.7": +"@embroider/addon-shim@npm:^1.0.0, @embroider/addon-shim@npm:^1.2.0, @embroider/addon-shim@npm:^1.6.0, @embroider/addon-shim@npm:^1.8.0, @embroider/addon-shim@npm:^1.8.3, @embroider/addon-shim@npm:^1.8.4, @embroider/addon-shim@npm:^1.8.6, @embroider/addon-shim@npm:^1.8.7, @embroider/addon-shim@npm:^1.8.9": version: 1.8.9 resolution: "@embroider/addon-shim@npm:1.8.9" dependencies: @@ -2081,10 +2081,10 @@ __metadata: linkType: hard "@embroider/util@npm:^1.0.0, @embroider/util@npm:^1.13.1": - version: 1.13.1 - resolution: "@embroider/util@npm:1.13.1" + version: 1.13.2 + resolution: "@embroider/util@npm:1.13.2" dependencies: - "@embroider/macros": ^1.16.1 + "@embroider/macros": ^1.16.5 broccoli-funnel: ^3.0.5 ember-cli-babel: ^7.26.11 peerDependencies: @@ -2096,7 +2096,7 @@ __metadata: optional: true "@glint/template": optional: true - checksum: 62e92aa9a6c39c711077c189f947567ace1cec27b5eda17c67a341171340808dcd4fad863a4c3a7130fbeb40eb386262deec82941c6d4f2367ac3ad0a4bc78a4 + checksum: 4b851e44960120a2a19d7d68812b3a14475c874866328ff13a17258b8373936e63dcdb97aa23e913d0700e762f0d497db5deb8aa1c8f1767e7f8e393adc6db5e languageName: node linkType: hard @@ -2143,28 +2143,28 @@ __metadata: linkType: hard "@floating-ui/core@npm:^1.6.0": - version: 1.6.4 - resolution: "@floating-ui/core@npm:1.6.4" + version: 1.6.5 + resolution: "@floating-ui/core@npm:1.6.5" dependencies: - "@floating-ui/utils": ^0.2.4 - checksum: 6855472c00ceaa14e0f1cb4bd5de0de01d05cd46bdf12cb19bd6a89fa70bdfba0460a776dc50d28ab40e3bddc291e2211958497528fdd98653ea7260d61e0442 + "@floating-ui/utils": ^0.2.5 + checksum: 8e6c62a6e9223fba9afbcaca8afe408788a2bc8ab1b2f5734a26d5b02d4017a2baffc7176a938a610fd243e6a983ada605f259b35c88813e2230dd29906a78fd languageName: node linkType: hard "@floating-ui/dom@npm:^1.6.3": - version: 1.6.7 - resolution: "@floating-ui/dom@npm:1.6.7" + version: 1.6.8 + resolution: "@floating-ui/dom@npm:1.6.8" dependencies: "@floating-ui/core": ^1.6.0 - "@floating-ui/utils": ^0.2.4 - checksum: 66605a2948bfe7532408197b4c522fecf04cf11e7839623d0dca0d22362b42d64a5db2f3be865053e9b0d44c89faf1befa9a4ce1b7fa595d1b3dc82f635d079c + "@floating-ui/utils": ^0.2.5 + checksum: bab6954bdde69afeaf8dbbf335818fe710c6eae1c62856ae1e09fa6abdc056bf5995e053638b76fa6661b8384c363ca2af874ab0448c3f6943808f4f8f77f3ea languageName: node linkType: hard -"@floating-ui/utils@npm:^0.2.4": - version: 0.2.4 - resolution: "@floating-ui/utils@npm:0.2.4" - checksum: af44cdb3f394fbee6abc933fc3c25bf22f3f0bac58150eee8cc1dcc7e9be56a19b13e438820160614a90712e5a43f84b091afa6689318a10504042930ae9cf44 +"@floating-ui/utils@npm:^0.2.5": + version: 0.2.5 + resolution: "@floating-ui/utils@npm:0.2.5" + checksum: 32834fe0fec5ee89187f8defd0b10813d725dab7dc6ed1545ded6655630bac5d438f0c991d019d675585e118846f12391236fc2886a5c73a57576e7de3eca3f9 languageName: node linkType: hard @@ -2458,9 +2458,9 @@ __metadata: languageName: node linkType: hard -"@hashicorp/design-system-components@npm:^4.1.0": - version: 4.5.3 - resolution: "@hashicorp/design-system-components@npm:4.5.3" +"@hashicorp/design-system-components@npm:^4.6.0": + version: 4.6.0 + resolution: "@hashicorp/design-system-components@npm:4.6.0" dependencies: "@ember/render-modifiers": ^2.0.5 "@ember/string": ^3.1.1 @@ -2469,7 +2469,6 @@ __metadata: "@floating-ui/dom": ^1.6.3 "@hashicorp/design-system-tokens": ^2.1.0 "@hashicorp/ember-flight-icons": ^5.1.2 - "@oddbird/popover-polyfill": ^0.4.3 decorator-transforms: ^1.1.0 ember-a11y-refocus: ^4.1.0 ember-cli-sass: ^11.0.1 @@ -2487,7 +2486,7 @@ __metadata: tippy.js: ^6.3.7 peerDependencies: ember-source: ^3.28.0 || ^4.0.0 || ^5.3.0 - checksum: 8914f05caed2e2fc484006695b5b4c2bddb46c5125ab01b0a7bace176937407fc2537870025ce8ee4fdd38c3c3ba8a91977e35309b877ef5f970c06a7105f987 + checksum: da5b83f03b1ed4d70ba79a011c51e45bd7a1688c95a589bd486ee5f31300a2a6e2d0f47a29e0ecc1f5dc39d1c389ce7ba4f3f2737bf289398a657b2199d0eb9e languageName: node linkType: hard @@ -2550,9 +2549,9 @@ __metadata: linkType: hard "@inquirer/figures@npm:^1.0.3": - version: 1.0.3 - resolution: "@inquirer/figures@npm:1.0.3" - checksum: ca83d9e2a02ed5309b3df5642d2194fde24e6f89779339c63304f2570f36f3bc431236a93db7fa412765a06f01c765974b06b1ed8b9aed881be46f2cbb67f9c7 + version: 1.0.5 + resolution: "@inquirer/figures@npm:1.0.5" + checksum: 01dc7b95fe7b030b0577d59f45c4fa5c002dccb43ac75ff106d7142825e09dee63a6f9c42b044da2bc964bf38c40229a112a26505a68f3912b15dc8304106bbc languageName: node linkType: hard @@ -2606,9 +2605,9 @@ __metadata: linkType: hard "@jridgewell/sourcemap-codec@npm:^1.4.10, @jridgewell/sourcemap-codec@npm:^1.4.14, @jridgewell/sourcemap-codec@npm:^1.4.15": - version: 1.4.15 - resolution: "@jridgewell/sourcemap-codec@npm:1.4.15" - checksum: b881c7e503db3fc7f3c1f35a1dd2655a188cc51a3612d76efc8a6eb74728bef5606e6758ee77423e564092b4a518aba569bbb21c9bac5ab7a35b0c6ae7e344c8 + version: 1.5.0 + resolution: "@jridgewell/sourcemap-codec@npm:1.5.0" + checksum: 05df4f2538b3b0f998ea4c1cd34574d0feba216fa5d4ccaef0187d12abf82eafe6021cec8b49f9bb4d90f2ba4582ccc581e72986a5fcf4176ae0cfeb04cf52ec languageName: node linkType: hard @@ -2662,9 +2661,9 @@ __metadata: linkType: hard "@mdn/browser-compat-data@npm:^5.2.34, @mdn/browser-compat-data@npm:^5.3.13": - version: 5.5.35 - resolution: "@mdn/browser-compat-data@npm:5.5.35" - checksum: bebf055740ad667f8078fa3245b9d2e7e7a58a70726947b52559e28c1dfab756cbca9642d3b8cb013428fb9bdb175794564e3d6e253eae2236966c78a866a5b4 + version: 5.5.40 + resolution: "@mdn/browser-compat-data@npm:5.5.40" + checksum: 58030a5b95b4d92aa695e72cfe8319e4605e985d11e52fedf8bb7469ff8496ccb4c0501bcb68ae5caeb3f69439f3422611996c6de9e4f4377dbb0b3af12fe55c languageName: node linkType: hard @@ -2786,13 +2785,6 @@ __metadata: languageName: node linkType: hard -"@oddbird/popover-polyfill@npm:^0.4.3": - version: 0.4.3 - resolution: "@oddbird/popover-polyfill@npm:0.4.3" - checksum: 9dad802ab9d199a3b30446acd77436dd4597af99918b4080f24ef8d56399c515f03521af2c3ef57a86da41ac29a173849d2f61ab83a894337d3122b3965d257b - languageName: node - linkType: hard - "@pkgjs/parseargs@npm:^0.11.0": version: 0.11.0 resolution: "@pkgjs/parseargs@npm:0.11.0" @@ -3094,13 +3086,23 @@ __metadata: languageName: node linkType: hard -"@types/eslint@npm:*, @types/eslint@npm:^8.4.2, @types/eslint@npm:^8.4.9": - version: 8.56.10 - resolution: "@types/eslint@npm:8.56.10" +"@types/eslint@npm:*": + version: 9.6.0 + resolution: "@types/eslint@npm:9.6.0" + dependencies: + "@types/estree": "*" + "@types/json-schema": "*" + checksum: 7be4b1d24f3df30b28e9cbaac6a5fa14ec1ceca7c173d9605c0ec6e0d1dcdba0452d326dd695dd980f5c14b42aa09fe41675c4f09ffc82db4f466588d3f837cb + languageName: node + linkType: hard + +"@types/eslint@npm:^8.4.2, @types/eslint@npm:^8.4.9": + version: 8.56.11 + resolution: "@types/eslint@npm:8.56.11" dependencies: "@types/estree": "*" "@types/json-schema": "*" - checksum: fb7137dd263ce1130b42d14452bdd0266ef81f52cb55ba1a5e9750e65da1f0596dc598c88bffc7e415458b6cb611a876dcc132bcf40ea48701c6d05b40c57be5 + checksum: 181a7f11bdc70523142554e4751b8571fa546f71f25fdc363298744857a01e830c9c009a61e81c1a0fd4f01a46f91d6d7098f582142fec94da8f86b94bb50b7a languageName: node linkType: hard @@ -3267,11 +3269,11 @@ __metadata: linkType: hard "@types/node@npm:*, @types/node@npm:>=10.0.0": - version: 20.14.9 - resolution: "@types/node@npm:20.14.9" + version: 20.14.11 + resolution: "@types/node@npm:20.14.11" dependencies: undici-types: ~5.26.4 - checksum: 5e9eda1ac8c6cc6bcd1063903ae195eaede9aad1bdad00408a919409cfbcdd2d6535aa3d50346f0d385528f9e03dafc7d1b3bad25aedb1dcd79a6ad39d06c35d + checksum: 24396dea2bc803c2d2ebfdd31a3e6e93818ba1a5933d63cd0f64fad1e2955a8280ba09338a48ffe68cd84748eec8bee27135045f15661aa389656f67fe0b0924 languageName: node linkType: hard @@ -3383,12 +3385,12 @@ __metadata: languageName: node linkType: hard -"@types/sinon@npm:^10.0.19": - version: 10.0.20 - resolution: "@types/sinon@npm:10.0.20" +"@types/sinon@npm:^17.0.3": + version: 17.0.3 + resolution: "@types/sinon@npm:17.0.3" dependencies: "@types/sinonjs__fake-timers": "*" - checksum: 7322771345c202b90057f8112e0d34b7339e5ae1827fb1bfe385fc9e38ed6a2f18b4c66e88d27d98c775f7f74fb1167c0c14f61ca64155786534541e6c6eb05f + checksum: c8e9956d9c90fe1ec1cc43085ae48897f93f9ea86e909ab47f255ea71f5229651faa070393950fb6923aef426c84e92b375503f9f8886ef44668b82a8ee49e9a languageName: node linkType: hard @@ -3811,11 +3813,11 @@ __metadata: linkType: hard "acorn@npm:^8.5.0, acorn@npm:^8.7.1, acorn@npm:^8.8.2, acorn@npm:^8.9.0": - version: 8.12.0 - resolution: "acorn@npm:8.12.0" + version: 8.12.1 + resolution: "acorn@npm:8.12.1" bin: acorn: bin/acorn - checksum: ae142de8739ef15a5d936c550c1d267fc4dedcdbe62ad1aa2c0009afed1de84dd0a584684a5d200bb55d8db14f3e09a95c6e92a5303973c04b9a7413c36d1df0 + checksum: 677880034aee5bdf7434cc2d25b641d7bedb0b5ef47868a78dadabedccf58e1c5457526d9d8249cd253f2df087e081c3fe7d903b448d8e19e5131a3065b83c07 languageName: node linkType: hard @@ -3894,14 +3896,14 @@ __metadata: linkType: hard "ajv@npm:^8.0.0, ajv@npm:^8.0.1, ajv@npm:^8.9.0": - version: 8.16.0 - resolution: "ajv@npm:8.16.0" + version: 8.17.1 + resolution: "ajv@npm:8.17.1" dependencies: fast-deep-equal: ^3.1.3 + fast-uri: ^3.0.1 json-schema-traverse: ^1.0.0 require-from-string: ^2.0.2 - uri-js: ^4.4.1 - checksum: bdf3d4c9f1d11e220850051ef4cd89346e951cfb933d6d41be36d45053c1092af1523ee6c62525cce567355caf0a4f4c19a08a93851649c1fa32b4a39b7c4858 + checksum: 1797bf242cfffbaf3b870d13565bd1716b73f214bb7ada9a497063aada210200da36e3ed40237285f3255acc4feeae91b1fb183625331bad27da95973f7253d9 languageName: node linkType: hard @@ -4322,9 +4324,9 @@ __metadata: linkType: hard "assert-never@npm:^1.2.1": - version: 1.2.1 - resolution: "assert-never@npm:1.2.1" - checksum: ea4f1756d90f55254c4dc7a20d6c5d5bc169160562aefe3d8756b598c10e695daf568f21b6d6b12245d7f3782d3ff83ef6a01ab75d487adfc6909470a813bf8c + version: 1.3.0 + resolution: "assert-never@npm:1.3.0" + checksum: 7ba7b06433bb4155ed0e7e6be4c65dbf4b0221441beb761d6c418d5ac9e3bdd1f6db9c5eeffb895eaf31a388e21f23b2a4f99af3194f54c2ea0e93edab8a3d8c languageName: node linkType: hard @@ -5624,17 +5626,17 @@ __metadata: languageName: node linkType: hard -"browserslist@npm:^4.0.0, browserslist@npm:^4.14.5, browserslist@npm:^4.21.10, browserslist@npm:^4.22.2, browserslist@npm:^4.23.0": - version: 4.23.1 - resolution: "browserslist@npm:4.23.1" +"browserslist@npm:^4.0.0, browserslist@npm:^4.14.5, browserslist@npm:^4.21.10, browserslist@npm:^4.23.0, browserslist@npm:^4.23.1": + version: 4.23.2 + resolution: "browserslist@npm:4.23.2" dependencies: - caniuse-lite: ^1.0.30001629 - electron-to-chromium: ^1.4.796 + caniuse-lite: ^1.0.30001640 + electron-to-chromium: ^1.4.820 node-releases: ^2.0.14 - update-browserslist-db: ^1.0.16 + update-browserslist-db: ^1.1.0 bin: browserslist: cli.js - checksum: 06189e2d6666a203ce097cc0e713a40477d08420927b79af139211e5712f3cf676fdc4dd6af3aa493d47c09206a344b3420a8315577dbe88c58903132de9b0f5 + checksum: 8212af37f6ca6355da191cf2d4ad49bd0b82854888b9a7e103638fada70d38cbe36d28feeeaa98344cb15d9128f9f74bcc8ce1bfc9011b5fd14381c1c6fb542c languageName: node linkType: hard @@ -5709,8 +5711,8 @@ __metadata: linkType: hard "cacache@npm:^18.0.0": - version: 18.0.3 - resolution: "cacache@npm:18.0.3" + version: 18.0.4 + resolution: "cacache@npm:18.0.4" dependencies: "@npmcli/fs": ^3.1.0 fs-minipass: ^3.0.0 @@ -5724,7 +5726,7 @@ __metadata: ssri: ^10.0.0 tar: ^6.1.11 unique-filename: ^3.0.0 - checksum: b717fd9b36e9c3279bfde4545c3a8f6d5a539b084ee26a9504d48f83694beb724057d26e090b97540f9cc62bea18b9f6cf671c50e18fb7dac60eda9db691714f + checksum: b7422c113b4ec750f33beeca0f426a0024c28e3172f332218f48f963e5b970647fa1ac05679fe5bb448832c51efea9fda4456b9a95c3a1af1105fe6c1833cde2 languageName: node linkType: hard @@ -5841,10 +5843,10 @@ __metadata: languageName: node linkType: hard -"caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30001524, caniuse-lite@npm:^1.0.30001629": - version: 1.0.30001639 - resolution: "caniuse-lite@npm:1.0.30001639" - checksum: 0d9291cc47ffaad5806716bff6fef41eec21f86a448370bc30a72823fcaf24ba5ccb4704841e6a60f078ddf2e9987e3d23f4d3ca0fffc51f6cb0400b7411ad28 +"caniuse-lite@npm:^1.0.0, caniuse-lite@npm:^1.0.30001524, caniuse-lite@npm:^1.0.30001640": + version: 1.0.30001643 + resolution: "caniuse-lite@npm:1.0.30001643" + checksum: e39991c13a0fd8f5c2aa99c9128188e4c4e9d6a203c3da6270c36285460ef152c5e9410ee4db560aa723904668946afe50541dce9636ab5e61434ba71dc22955 languageName: node linkType: hard @@ -6248,9 +6250,9 @@ __metadata: linkType: hard "codemirror@npm:^5.58.2": - version: 5.65.16 - resolution: "codemirror@npm:5.65.16" - checksum: 1c5036bfffcce19b1ff91d8b158dcb45faba27047c4093f55ea7ad1165975179eb47c9ef604baa9c4f4ea6bf9817886c767f33e72fa9c62710404029be3c4744 + version: 5.65.17 + resolution: "codemirror@npm:5.65.17" + checksum: 8bc853524c6416826364d776b012f488b3f4736899e5c8026062f43927e09de773051dd1b34e8cfd25642d7e358679ca5b113f0034fdd6a295f4193b04f8c528 languageName: node linkType: hard @@ -6656,7 +6658,7 @@ __metadata: languageName: node linkType: hard -"core-js-compat@npm:^3.31.0, core-js-compat@npm:^3.36.1": +"core-js-compat@npm:^3.36.1, core-js-compat@npm:^3.37.1": version: 3.37.1 resolution: "core-js-compat@npm:3.37.1" dependencies: @@ -7141,6 +7143,16 @@ __metadata: languageName: node linkType: hard +"decorator-transforms@npm:^2.0.0": + version: 2.0.0 + resolution: "decorator-transforms@npm:2.0.0" + dependencies: + "@babel/plugin-syntax-decorators": ^7.23.3 + babel-import-util: ^3.0.0 + checksum: 1736a83181be2484e7eb5f1e7b60543712b6cbf25711dfc55e4a948ea4d10a7be8aef3d8011fb3f733ae61e983446ffa7ae88b02ae445c113406527097c70e1a + languageName: node + linkType: hard + "dedent@npm:^0.7.0": version: 0.7.0 resolution: "dedent@npm:0.7.0" @@ -7314,23 +7326,23 @@ __metadata: languageName: node linkType: hard -"dmd@npm:^6.2.0": - version: 6.2.0 - resolution: "dmd@npm:6.2.0" +"dmd@npm:^6.2.1": + version: 6.2.2 + resolution: "dmd@npm:6.2.2" dependencies: array-back: ^6.2.2 cache-point: ^2.0.0 common-sequence: ^2.0.2 - file-set: ^4.0.2 - handlebars: ^4.7.7 - marked: ^4.2.3 + fast-glob: ^3.3.2 + handlebars: ^4.7.8 + marked: ^4.3.0 object-get: ^2.1.1 reduce-flatten: ^3.0.1 reduce-unique: ^2.0.1 reduce-without: ^1.0.1 test-value: ^3.0.0 walk-back: ^5.1.0 - checksum: c10a10c466b78ed320cf2b3bd1a34fe39a2efbe0845db39158c61d85d1374cc072f2ad7e73ded2c9767828d5f2afaa96c73f88053aa9df50b3f344d6649a9893 + checksum: 4e84d3a0d3fcee163d8705f09d0607a022af177e0655eef41d16e325f9fab809d6d96340b73e7979ce94aed3e6889552cb9e446dbb2b2857c59f3f325c69c9bd languageName: node linkType: hard @@ -7431,9 +7443,9 @@ __metadata: linkType: hard "dompurify@npm:^3.0.2": - version: 3.1.5 - resolution: "dompurify@npm:3.1.5" - checksum: 18ae2930cba3c260889b99e312c382c344d219bd113bc39fbb665a61987d25849021768f490395e6954aab94448a24b3c3721c160b53550547110c37cebe9feb + version: 3.1.6 + resolution: "dompurify@npm:3.1.6" + checksum: cc4fc4ccd9261fbceb2a1627a985c70af231274a26ddd3f643fd0616a0a44099bd9e4480940ce3655612063be4a1fe9f5e9309967526f8c0a99f931602323866 languageName: node linkType: hard @@ -7519,20 +7531,20 @@ __metadata: languageName: node linkType: hard -"electron-to-chromium@npm:^1.4.796": - version: 1.4.816 - resolution: "electron-to-chromium@npm:1.4.816" - checksum: 5abaa04cee77af4889e68d7fd7305c50b98eaa9b4016b228c85de5713a933767e423e2e6bcd71007fff1c405c5bea79d6e9e9d18efddaa966040fe9e97f43e2e +"electron-to-chromium@npm:^1.4.820": + version: 1.4.832 + resolution: "electron-to-chromium@npm:1.4.832" + checksum: a1f71cf7665441d28cfe5ff31415d7a64036d83226c40322c1412de118091ad5010fd0da831dc04de115d978e91074756b7fbc9e7788e4f98888f0e194b5bdac languageName: node linkType: hard "ember-a11y-refocus@npm:^4.1.0": - version: 4.1.0 - resolution: "ember-a11y-refocus@npm:4.1.0" + version: 4.1.1 + resolution: "ember-a11y-refocus@npm:4.1.1" dependencies: ember-cli-babel: ^7.26.11 ember-cli-htmlbars: ^6.0.1 - checksum: 04d5cca73885e37d087dc214aec83e9d388e4b3b78cf5adc7fd09735fad547c65b792437f4915b392581529a6b1bb7d1d2b97c94483a162a80892487d9cc1551 + checksum: b9f861f1359e8c720bf844161da3eecbe2218149739211961d216b6fcaec5e78dfd51debe5ec2707ae0d31fdbbd9cf692349e3f0f5b47d1f12bd963c021494ac languageName: node linkType: hard @@ -8508,7 +8520,7 @@ __metadata: languageName: node linkType: hard -"ember-inflector@npm:4.0.2, ember-inflector@npm:^2.0.0 || ^3.0.0 || ^4.0.2, ember-inflector@npm:^4.0.2": +"ember-inflector@npm:4.0.2": version: 4.0.2 resolution: "ember-inflector@npm:4.0.2" dependencies: @@ -8517,6 +8529,17 @@ __metadata: languageName: node linkType: hard +"ember-inflector@npm:^2.0.0 || ^3.0.0 || ^4.0.2, ember-inflector@npm:^4.0.2": + version: 4.0.3 + resolution: "ember-inflector@npm:4.0.3" + dependencies: + ember-cli-babel: ^7.26.11 + peerDependencies: + ember-source: ^3.16.0 || ^4.0.0 || ^5.0.0 + checksum: b4dbd31e6f1141082cee70236aa5575dd32f9d1562599911fa920d34de458781df4f7a3aee108fd86062ecaa1337064345aee1e55271a3c1e1975947057ce037 + languageName: node + linkType: hard + "ember-keyboard@npm:^8.2.1": version: 8.2.1 resolution: "ember-keyboard@npm:8.2.1" @@ -8590,18 +8613,19 @@ __metadata: linkType: hard "ember-modifier@npm:^2.1.2 || ^3.1.0 || ^4.0.0, ember-modifier@npm:^3.2.7 || ^4.0.0, ember-modifier@npm:^4.1.0": - version: 4.1.0 - resolution: "ember-modifier@npm:4.1.0" + version: 4.2.0 + resolution: "ember-modifier@npm:4.2.0" dependencies: - "@embroider/addon-shim": ^1.8.4 + "@embroider/addon-shim": ^1.8.7 + decorator-transforms: ^2.0.0 ember-cli-normalize-entity-name: ^1.0.0 ember-cli-string-utils: ^1.1.0 peerDependencies: - ember-source: "*" + ember-source: ^3.24 || >=4.0 peerDependenciesMeta: ember-source: optional: true - checksum: 5e14a864de2184c07e59fb9bc76a09ae25d1bd37722a94751a3cef6165df22027007696c4dda03e1862cb3bbeefb046772810dda3104d05c7fe8476389c34a77 + checksum: 5fcea62029ddc880cbfc519f9e5abf33564ecce3c5cbdd54b6b293313df558febe171de5308e5b0d81da284b6914e169412d9e1893e48a45dc51f77214efe65d languageName: node linkType: hard @@ -8783,16 +8807,17 @@ __metadata: linkType: hard "ember-sinon-qunit@npm:^7.4.0": - version: 7.4.0 - resolution: "ember-sinon-qunit@npm:7.4.0" + version: 7.5.0 + resolution: "ember-sinon-qunit@npm:7.5.0" dependencies: - "@embroider/addon-shim": ^1.8.6 - "@types/sinon": ^10.0.19 + "@embroider/addon-shim": ^1.8.9 + "@types/sinon": ^17.0.3 + decorator-transforms: ^2.0.0 peerDependencies: ember-source: ">=3.28.0" qunit: ^2.0.0 - sinon: ^15.0.3 || ^16.0.0 || ^17.0.0 - checksum: e83cd2113001f670d125e7ad186ca276fdcb74a0de44f296275c4e75ad4832d89103bc9ee23e3f5143d82bb01345313bb6a1f5b070f1b010cdd435d24e418105 + sinon: ">=15.0.3" + checksum: d6ad80c1b28676f055b4e2b0638913c1ed6c721f1ffc839f185e48ceab9a5cbd3088de4a88d667fd2b82bb3109ea43d85efa12e463fe47276aa04b34589e31bd languageName: node linkType: hard @@ -8879,17 +8904,17 @@ __metadata: linkType: hard "ember-style-modifier@npm:^4.1.0, ember-style-modifier@npm:^4.3.1": - version: 4.3.1 - resolution: "ember-style-modifier@npm:4.3.1" + version: 4.4.0 + resolution: "ember-style-modifier@npm:4.4.0" dependencies: "@embroider/addon-shim": ^1.8.7 csstype: ^3.1.3 - decorator-transforms: ^1.0.1 + decorator-transforms: ^2.0.0 ember-modifier: ^3.2.7 || ^4.0.0 peerDependencies: - "@ember/string": ^3.0.1 + "@ember/string": ^3.1.1 || ^4.0.0 ember-source: ^3.28.0 || ^4.0.0 || >=5.0.0 - checksum: a60cf8d8718e7e45cbf67c3520c9e956d519eeb552f4760420817116b55db176982a1af2dcb6d8913c66681cc9cfe3cf039f812532a4d19505a365f49d117ea9 + checksum: 0027a0c842f9db024781e32e3a04564a1ef6a14e1ac95aac63bcac8935c4277f10f21cc0d31c60ef221ac944fac6ed3b9e9cd40c29993dea1bc4fb7d7a51b912 languageName: node linkType: hard @@ -9123,9 +9148,9 @@ __metadata: linkType: hard "engine.io-parser@npm:~5.2.1": - version: 5.2.2 - resolution: "engine.io-parser@npm:5.2.2" - checksum: 470231215f3136a9259efb1268bc9a71f789af4e8c74da8d3b49ceb149fe3cd5c315bf0cd13d2d8d9c8f0f051c6f93b68e8fa9c89a3b612b9217bf33765c943a + version: 5.2.3 + resolution: "engine.io-parser@npm:5.2.3" + checksum: a76d998b794ce8bbcade833064d949715781fdb9e9cf9b33ecf617d16355ddfd7772f12bb63aaec0f497d63266c6db441129c5aa24c60582270f810c696a6cf8 languageName: node linkType: hard @@ -9514,11 +9539,11 @@ __metadata: linkType: hard "eslint-plugin-prettier@npm:^5.0.1": - version: 5.1.3 - resolution: "eslint-plugin-prettier@npm:5.1.3" + version: 5.2.1 + resolution: "eslint-plugin-prettier@npm:5.2.1" dependencies: prettier-linter-helpers: ^1.0.0 - synckit: ^0.8.6 + synckit: ^0.9.1 peerDependencies: "@types/eslint": ">=8.0.0" eslint: ">=8.0.0" @@ -9529,7 +9554,7 @@ __metadata: optional: true eslint-config-prettier: optional: true - checksum: eb2a7d46a1887e1b93788ee8f8eb81e0b6b2a6f5a66a62bc6f375b033fc4e7ca16448da99380be800042786e76cf5c0df9c87a51a2c9b960ed47acbd7c0b9381 + checksum: 812f4d1596dcd3a55963212dfbd818a4b38f880741aac75f6869aa740dc5d934060674d3b85d10ff9fec424defa61967dbdef26b8a893a92c9b51880264ed0d9 languageName: node linkType: hard @@ -9680,11 +9705,11 @@ __metadata: linkType: hard "esquery@npm:^1.4.0, esquery@npm:^1.4.2": - version: 1.5.0 - resolution: "esquery@npm:1.5.0" + version: 1.6.0 + resolution: "esquery@npm:1.6.0" dependencies: estraverse: ^5.1.0 - checksum: aefb0d2596c230118656cd4ec7532d447333a410a48834d80ea648b1e7b5c9bc9ed8b5e33a89cb04e487b60d622f44cf5713bf4abed7c97343edefdc84a35900 + checksum: 08ec4fe446d9ab27186da274d979558557fbdbbd10968fa9758552482720c54152a5640e08b9009e5a30706b66aba510692054d4129d32d0e12e05bbc0b96fb2 languageName: node linkType: hard @@ -10052,6 +10077,13 @@ __metadata: languageName: node linkType: hard +"fast-uri@npm:^3.0.1": + version: 3.0.1 + resolution: "fast-uri@npm:3.0.1" + checksum: 106143ff83705995225dcc559411288f3337e732bb2e264e79788f1914b6bd8f8bc3683102de60b15ba00e6ebb443633cabac77d4ebc5cb228c47cf955e199ff + languageName: node + linkType: hard + "fastest-levenshtein@npm:^1.0.16": version: 1.0.16 resolution: "fastest-levenshtein@npm:1.0.16" @@ -10131,20 +10163,10 @@ __metadata: languageName: node linkType: hard -"file-set@npm:^4.0.2": - version: 4.0.2 - resolution: "file-set@npm:4.0.2" - dependencies: - array-back: ^5.0.0 - glob: ^7.1.6 - checksum: 6eacb9df4a0a95fbfb09e8ccdf8c3ef7f30de8c0b5043ff0530a79c0c0003550a0725eba517a5c7fe1c452df57c3b4d506e91859ccc248aebb2f038790eb66e6 - languageName: node - linkType: hard - "filesize@npm:^10.0.8": - version: 10.1.2 - resolution: "filesize@npm:10.1.2" - checksum: 584cd30415e27e19effd27da7178b7b95dbab065d7b954a5cd763318db55afc12f36077aeea3c22c94849d66d0c82ea48e644c66d339f224e8e928e666aa3e4a + version: 10.1.4 + resolution: "filesize@npm:10.1.4" + checksum: b54949fb1a2ecf2407afeb08f943f59a81da382a83ad2b8472ca2a64ba08345ecd489cb44914f44e48dd125c3658f19687d2d4920ae4505e6356f1054c139dcf languageName: node linkType: hard @@ -10893,11 +10915,11 @@ __metadata: linkType: hard "get-tsconfig@npm:^4.7.0": - version: 4.7.5 - resolution: "get-tsconfig@npm:4.7.5" + version: 4.7.6 + resolution: "get-tsconfig@npm:4.7.6" dependencies: resolve-pkg-maps: ^1.0.0 - checksum: e5b271fae2b4cd1869bbfc58db56983026cc4a08fdba988725a6edd55d04101507de154722503a22ee35920898ff9bdcba71f99d93b17df35dddb8e8a2ad91be + checksum: ebfd86f0b356cde98e2a7afe63b58d92e02b8e413ff95551933d277702bf725386ee82c5c0092fe45fb2ba60002340c94ee70777b3220bbfeca83ab45dda1544 languageName: node linkType: hard @@ -10955,8 +10977,8 @@ __metadata: linkType: hard "glob@npm:^10.2.2, glob@npm:^10.3.10, glob@npm:^10.3.7": - version: 10.4.2 - resolution: "glob@npm:10.4.2" + version: 10.4.5 + resolution: "glob@npm:10.4.5" dependencies: foreground-child: ^3.1.0 jackspeak: ^3.1.2 @@ -10966,7 +10988,7 @@ __metadata: path-scurry: ^1.11.1 bin: glob: dist/esm/bin.mjs - checksum: bd7c0e30701136e936f414e5f6f82c7f04503f01df77408f177aa584927412f0bde0338e6ec541618cd21eacc57dde33e7b3c6c0a779cc1c6e6a0e14f3d15d9b + checksum: 0bc725de5e4862f9f387fd0f2b274baf16850dcd2714502ccf471ee401803997983e2c05590cb65f9675a3c6f2a58e7a53f9e365704108c6ad3cbf1d60934c4a languageName: node linkType: hard @@ -11212,7 +11234,7 @@ __metadata: languageName: node linkType: hard -"handlebars@npm:^4.0.11, handlebars@npm:^4.0.4, handlebars@npm:^4.3.1, handlebars@npm:^4.7.3, handlebars@npm:^4.7.7": +"handlebars@npm:^4.0.11, handlebars@npm:^4.0.4, handlebars@npm:^4.3.1, handlebars@npm:^4.7.3, handlebars@npm:^4.7.8": version: 4.7.8 resolution: "handlebars@npm:4.7.8" dependencies: @@ -11676,9 +11698,9 @@ __metadata: linkType: hard "immutable@npm:^4.0.0": - version: 4.3.6 - resolution: "immutable@npm:4.3.6" - checksum: 3afd020be988ec9ba42c1e585b88858970beba91332ac04ac11446722c7e5da03d5956f5049806573d29dfee25f69262297cb7f3bd6b16fc83a175a0176c6c2a + version: 4.3.7 + resolution: "immutable@npm:4.3.7" + checksum: 1c50eb053bb300796551604afff554066f041aa8e15926cf98f6d11d9736b62ad12531c06515dd96375258653878b4736f8051cd20b640f5f976d09fa640e3ec languageName: node linkType: hard @@ -11830,8 +11852,8 @@ __metadata: linkType: hard "inquirer@npm:^9.1.5": - version: 9.3.2 - resolution: "inquirer@npm:9.3.2" + version: 9.3.6 + resolution: "inquirer@npm:9.3.6" dependencies: "@inquirer/figures": ^1.0.3 ansi-escapes: ^4.3.2 @@ -11844,8 +11866,8 @@ __metadata: string-width: ^4.2.3 strip-ansi: ^6.0.1 wrap-ansi: ^6.2.0 - yoctocolors-cjs: ^2.1.1 - checksum: 8a606d400bfc8ce5a3fd70ce38a158327d7f65274cadce25acdfdf93e90aedfaa7b705b7929a10510b928c76c70bb39ca4e566e23620d45ce5c91b2334190f95 + yoctocolors-cjs: ^2.1.2 + checksum: f1fd086585e301ec17ce016355e9eb6eb87329c6de578cde35b10d5e4b57443b9f8f1f304d3ab570e5dad2cbc55851c476480296e15793f76836c0c33cf2e713 languageName: node linkType: hard @@ -11986,11 +12008,11 @@ __metadata: linkType: hard "is-core-module@npm:^2.12.1, is-core-module@npm:^2.13.0, is-core-module@npm:^2.5.0": - version: 2.14.0 - resolution: "is-core-module@npm:2.14.0" + version: 2.15.0 + resolution: "is-core-module@npm:2.15.0" dependencies: hasown: ^2.0.2 - checksum: 6bba6c8dc99d88d6f3b2746709d82caddcd9565cafd5870e28ab320720e27e6d9d2bb953ba0839ed4d2ee264bfdd14a9fa1bbc242a916f7dacc8aa95f0322256 + checksum: a9f7a52707c9b59d7164094d183bda892514fc3ba3139f245219c7abe7f6e8d3e2cdcf861f52a891a467f785f1dfa5d549f73b0ee715f4ba56e8882d335ea585 languageName: node linkType: hard @@ -12444,15 +12466,15 @@ __metadata: linkType: hard "jackspeak@npm:^3.1.2": - version: 3.4.0 - resolution: "jackspeak@npm:3.4.0" + version: 3.4.3 + resolution: "jackspeak@npm:3.4.3" dependencies: "@isaacs/cliui": ^8.0.2 "@pkgjs/parseargs": ^0.11.0 dependenciesMeta: "@pkgjs/parseargs": optional: true - checksum: 350f6f311018bb175ffbe736b19c26ac0b134bb5a17a638169e89594eb0c24ab1c658ab3a2fda24ff63b3b19292e1a5ec19d2255bc526df704e8168d392bef85 + checksum: be31027fc72e7cc726206b9f560395604b82e0fddb46c4cbf9f97d049bcef607491a5afc0699612eaa4213ca5be8fd3e1e7cd187b3040988b65c9489838a7c00 languageName: node linkType: hard @@ -12527,20 +12549,19 @@ __metadata: languageName: node linkType: hard -"jsdoc-api@npm:^8.0.0": - version: 8.0.0 - resolution: "jsdoc-api@npm:8.0.0" +"jsdoc-api@npm:^8.1.0": + version: 8.1.0 + resolution: "jsdoc-api@npm:8.1.0" dependencies: array-back: ^6.2.2 cache-point: ^2.0.0 collect-all: ^1.0.4 - file-set: ^4.0.2 fs-then-native: ^2.0.0 - jsdoc: ^4.0.0 + jsdoc: ^4.0.3 object-to-spawn-args: ^2.0.1 temp-path: ^1.0.0 walk-back: ^5.1.0 - checksum: 048d414895bf1fc4badba21385f43fe771f7f88e936b0e900744b0f9de67d0db224a51ea8df43c9aa92779cd220dfc1c042d06a5cd534f1eb35ad9397d5492ea + checksum: 1c87990b12899e9f491cc66a0a02579b0d9864e17b63e94b1d2590658e4f58a09c7a7017c461cc9692b97a1345e4c700b3fade6b8ca7aab2084b94412e164b57 languageName: node linkType: hard @@ -12577,23 +12598,23 @@ __metadata: linkType: hard "jsdoc-to-markdown@npm:^8.0.1": - version: 8.0.1 - resolution: "jsdoc-to-markdown@npm:8.0.1" + version: 8.0.2 + resolution: "jsdoc-to-markdown@npm:8.0.2" dependencies: array-back: ^6.2.2 command-line-tool: ^0.8.0 config-master: ^3.1.0 - dmd: ^6.2.0 - jsdoc-api: ^8.0.0 + dmd: ^6.2.1 + jsdoc-api: ^8.1.0 jsdoc-parse: ^6.2.1 walk-back: ^5.1.0 bin: jsdoc2md: bin/cli.js - checksum: 5423f82bb801276a1344b554b42df43683b2c2406421bc3eefff67d768d2d8d465b1e13ea3c7666e0d831dff7faca50820a179ba51ced671dd819751e5c3c9c4 + checksum: 92e110fcfb48807182cd2ce7d19c7523549b332970bdcff13feb10d2d72d45f062f3e178e02c22c2628dbde79783ed7e41a77671eeaa84cd5942d4f6abf8f3bb languageName: node linkType: hard -"jsdoc@npm:^4.0.0": +"jsdoc@npm:^4.0.3": version: 4.0.3 resolution: "jsdoc@npm:4.0.3" dependencies: @@ -13311,9 +13332,9 @@ __metadata: linkType: hard "lru-cache@npm:^10.0.1, lru-cache@npm:^10.2.0": - version: 10.3.0 - resolution: "lru-cache@npm:10.3.0" - checksum: f2289639bd94cf3c87bfd8a77ac991f9afe3af004ddca3548c3dae63ead1c73bba449a60a4e270992e16cf3261b3d4130943234d52ca3a4d4de2fc074a3cc7b5 + version: 10.4.3 + resolution: "lru-cache@npm:10.4.3" + checksum: 6476138d2125387a6d20f100608c2583d415a4f64a0fecf30c9e2dda976614f09cad4baa0842447bd37dd459a7bd27f57d9d8f8ce558805abd487c583f3d774a languageName: node linkType: hard @@ -13527,7 +13548,7 @@ __metadata: languageName: node linkType: hard -"marked@npm:^4.0.10, marked@npm:^4.2.3": +"marked@npm:^4.0.10, marked@npm:^4.3.0": version: 4.3.0 resolution: "marked@npm:4.3.0" bin: @@ -13970,13 +13991,20 @@ __metadata: languageName: node linkType: hard -"mime-db@npm:1.52.0, mime-db@npm:>= 1.43.0 < 2": +"mime-db@npm:1.52.0": version: 1.52.0 resolution: "mime-db@npm:1.52.0" checksum: 0d99a03585f8b39d68182803b12ac601d9c01abfa28ec56204fa330bc9f3d1c5e14beb049bafadb3dbdf646dfb94b87e24d4ec7b31b7279ef906a8ea9b6a513f languageName: node linkType: hard +"mime-db@npm:>= 1.43.0 < 2": + version: 1.53.0 + resolution: "mime-db@npm:1.53.0" + checksum: 3fd9380bdc0b085d0b56b580e4f89ca4fc3b823722310d795c248f0806b9a80afd5d8f4347f015ad943b9ecfa7cc0b71dffa0db96fa776d01a13474821a2c7fb + languageName: node + linkType: hard + "mime-types@npm:^2.1.18, mime-types@npm:^2.1.19, mime-types@npm:^2.1.26, mime-types@npm:^2.1.27, mime-types@npm:~2.1.24, mime-types@npm:~2.1.34": version: 2.1.35 resolution: "mime-types@npm:2.1.35" @@ -14447,8 +14475,8 @@ __metadata: linkType: hard "node-gyp@npm:latest": - version: 10.1.0 - resolution: "node-gyp@npm:10.1.0" + version: 10.2.0 + resolution: "node-gyp@npm:10.2.0" dependencies: env-paths: ^2.2.0 exponential-backoff: ^3.1.1 @@ -14456,13 +14484,13 @@ __metadata: graceful-fs: ^4.2.6 make-fetch-happen: ^13.0.0 nopt: ^7.0.0 - proc-log: ^3.0.0 + proc-log: ^4.1.0 semver: ^7.3.5 - tar: ^6.1.2 + tar: ^6.2.1 which: ^4.0.0 bin: node-gyp: bin/node-gyp.js - checksum: 72e2ab4b23fc32007a763da94018f58069fc0694bf36115d49a2b195c8831e12cf5dd1e7a3718fa85c06969aedf8fc126722d3b672ec1cb27e06ed33caee3c60 + checksum: 0233759d8c19765f7fdc259a35eb046ad86c3d09e22f7384613ae2b89647dd27fcf833fdf5293d9335041e91f9b1c539494225959cdb312a5c8080b7534b926f languageName: node linkType: hard @@ -14495,9 +14523,9 @@ __metadata: linkType: hard "node-releases@npm:^2.0.14": - version: 2.0.14 - resolution: "node-releases@npm:2.0.14" - checksum: 59443a2f77acac854c42d321bf1b43dea0aef55cd544c6a686e9816a697300458d4e82239e2d794ea05f7bbbc8a94500332e2d3ac3f11f52e4b16cbe638b3c41 + version: 2.0.18 + resolution: "node-releases@npm:2.0.18" + checksum: ef55a3d853e1269a6d6279b7692cd6ff3e40bc74947945101138745bfdc9a5edabfe72cb19a31a8e45752e1910c4c65c77d931866af6357f242b172b7283f5b3 languageName: node linkType: hard @@ -15425,12 +15453,12 @@ __metadata: linkType: hard "postcss-selector-parser@npm:^6.0.13, postcss-selector-parser@npm:^6.0.2, postcss-selector-parser@npm:^6.0.4": - version: 6.1.0 - resolution: "postcss-selector-parser@npm:6.1.0" + version: 6.1.1 + resolution: "postcss-selector-parser@npm:6.1.1" dependencies: cssesc: ^3.0.0 util-deprecate: ^1.0.2 - checksum: 449f614e6706421be307d8638183c61ba45bc3b460fe3815df8971dbb4d59c4087181940d879daee4a7a2daf3d86e915db1cce0c006dd68ca75b4087079273bd + checksum: 1c6a5adfc3c19c6e1e7d94f8addb89a5166fcca72c41f11713043d381ecbe82ce66360c5524e904e17b54f7fc9e6a077994ff31238a456bc7320c3e02e88d92e languageName: node linkType: hard @@ -15576,7 +15604,7 @@ __metadata: languageName: node linkType: hard -"proc-log@npm:^4.2.0": +"proc-log@npm:^4.1.0, proc-log@npm:^4.2.0": version: 4.2.0 resolution: "proc-log@npm:4.2.0" checksum: 98f6cd012d54b5334144c5255ecb941ee171744f45fca8b43b58ae5a0c1af07352475f481cadd9848e7f0250376ee584f6aa0951a856ff8f021bdfbff4eb33fc @@ -15725,11 +15753,11 @@ __metadata: linkType: hard "qs@npm:^6.4.0": - version: 6.12.2 - resolution: "qs@npm:6.12.2" + version: 6.12.3 + resolution: "qs@npm:6.12.3" dependencies: side-channel: ^1.0.6 - checksum: cb141456f3e518b4212177f5658168acbab60c90735f27f131336f7ae0286b51402911d4a0a786d83d3ba4aa801c032383b4304b28474de00388eb95cf988c8c + checksum: 9a9228a623bc36d41648237667d7342fb8d64d1cfeb29e474b0c44591ba06ac507e2d726f60eca5af8dc420e5dd23370af408ef8c28e0405675c7187b736a693 languageName: node linkType: hard @@ -15778,15 +15806,15 @@ __metadata: linkType: hard "qunit@npm:^2.20.0": - version: 2.21.0 - resolution: "qunit@npm:2.21.0" + version: 2.21.1 + resolution: "qunit@npm:2.21.1" dependencies: commander: 7.2.0 node-watch: 0.7.3 tiny-glob: 0.2.9 bin: qunit: bin/qunit.js - checksum: 7d0ddfcd2f47347924e5e031d83c9cf1d884283d3dc8b800413784ede55d6680fa0d166bbe3ac1eacbce0a60dd379b4d98bf2f0452abd5a2d50aafc795e7497a + checksum: 51d7c323ef858847cb4fb8b3466e1a26635cbd1b5dbce69e910bf5c1e6a75710d62e4021bb6dbcc787d955522020e4c54c1a8853bc8dc9e9ef8a4c8cf4b76a07 languageName: node linkType: hard @@ -16408,13 +16436,13 @@ __metadata: linkType: hard "rimraf@npm:^5.0.0": - version: 5.0.7 - resolution: "rimraf@npm:5.0.7" + version: 5.0.9 + resolution: "rimraf@npm:5.0.9" dependencies: glob: ^10.3.7 bin: rimraf: dist/esm/bin.mjs - checksum: 884852abf8aefd4667448d87bdab04120a8641266c828cf382ac811713547eda18f81799d2146ffec3178f357d83d44ec01c10095949c82e23551660732bf14f + checksum: e6dd5007e34181e1fa732437499d798035b2f3313887435cb855c5c9055bf9646795fc1c63ef843de830df8577cd9862df2dabf913fe08dcc1758c96de4a4fdb languageName: node linkType: hard @@ -16680,15 +16708,15 @@ __metadata: linkType: hard "sass@npm:^1.66.3, sass@npm:^1.69.5": - version: 1.77.6 - resolution: "sass@npm:1.77.6" + version: 1.77.8 + resolution: "sass@npm:1.77.8" dependencies: chokidar: ">=3.0.0 <4.0.0" immutable: ^4.0.0 source-map-js: ">=0.6.2 <2.0.0" bin: sass: sass.js - checksum: 9bd1cb9ec1f10b7df83ed6a4b3d8764fe9174ee422f1ea21c51bcd953f710deee57c649269f9cb1ad1e9dcc3b87efee62cd2b36aca9cc646d44fd9179300d5f3 + checksum: 6b5dce17faa1bd1e349b4825bf7f76559a32f3f95d789cd2847623c88ee9635e1485d3458532a05fa5b9134cfbce79a4bad3f13dc63c2433632347674db0abae languageName: node linkType: hard @@ -16759,11 +16787,11 @@ __metadata: linkType: hard "semver@npm:^7.0.0, semver@npm:^7.3.2, semver@npm:^7.3.4, semver@npm:^7.3.5, semver@npm:^7.3.7, semver@npm:^7.3.8, semver@npm:^7.5.2, semver@npm:^7.5.3, semver@npm:^7.5.4": - version: 7.6.2 - resolution: "semver@npm:7.6.2" + version: 7.6.3 + resolution: "semver@npm:7.6.3" bin: semver: bin/semver.js - checksum: 40f6a95101e8d854357a644da1b8dd9d93ce786d5c6a77227bc69dbb17bea83d0d1d1d7c4cd5920a6df909f48e8bd8a5909869535007f90278289f2451d0292d + checksum: 4110ec5d015c9438f322257b1c51fe30276e5f766a3f64c09edd1d7ea7118ecbc3f379f3b69032bacf13116dc7abc4ad8ce0d7e2bd642e26b0d271b56b61a7d8 languageName: node linkType: hard @@ -17858,13 +17886,13 @@ __metadata: languageName: node linkType: hard -"synckit@npm:^0.8.6": - version: 0.8.8 - resolution: "synckit@npm:0.8.8" +"synckit@npm:^0.9.1": + version: 0.9.1 + resolution: "synckit@npm:0.9.1" dependencies: "@pkgr/core": ^0.1.0 tslib: ^2.6.2 - checksum: 9ed5d33abb785f5f24e2531efd53b2782ca77abf7912f734d170134552b99001915531be5a50297aa45c5701b5c9041e8762e6cd7a38e41e2461c1e7fccdedf8 + checksum: 4042941a4d939675f1d7b01124b8405b6ac616f3e3f396d00e46c67f38d0d5b7f9a1de05bc7ceea4ce80d967b450cfa2460e5f6aca81f7cea8f1a28be9392985 languageName: node linkType: hard @@ -17921,7 +17949,7 @@ __metadata: languageName: node linkType: hard -"tar@npm:^6.1.11, tar@npm:^6.1.2": +"tar@npm:^6.1.11, tar@npm:^6.2.1": version: 6.2.1 resolution: "tar@npm:6.2.1" dependencies: @@ -17988,8 +18016,8 @@ __metadata: linkType: hard "terser@npm:^5.26.0, terser@npm:^5.7.0": - version: 5.31.1 - resolution: "terser@npm:5.31.1" + version: 5.31.3 + resolution: "terser@npm:5.31.3" dependencies: "@jridgewell/source-map": ^0.3.3 acorn: ^8.8.2 @@ -17997,7 +18025,7 @@ __metadata: source-map-support: ~0.5.20 bin: terser: bin/terser - checksum: 6ab57e62e9cd690dc99b3d0ee2e07289cd3408109a950c7118bf39e32851a5bf08b67fe19e0ac43a5a98813792ac78101bf25e5aa524f05ae8bb4e0131d0feef + checksum: cb4ccd5cb42c719272959dcae63d41e4696fb304123392943282caa6dfcdc49f94e7c48353af8bcd4fbc34457b240b7f843db7fec21bb2bdc18e01d4f45b035e languageName: node linkType: hard @@ -18576,11 +18604,11 @@ __metadata: linkType: hard "uglify-js@npm:^3.1.4": - version: 3.18.0 - resolution: "uglify-js@npm:3.18.0" + version: 3.19.0 + resolution: "uglify-js@npm:3.19.0" bin: uglifyjs: bin/uglifyjs - checksum: 887733d05d4139a94dffd04a5f07ee7d8be70201c016ea48cb82703778b5c48fadbe6e5e7ac956425522f72e657d3eade23f06ae8a0e2eeed2d684bf6cc25e36 + checksum: 23dc4778a9c5b5252888f3871e34b4a5e69ccc92e0febd9598c82cb559a7d550244ebc3f10eb0af0586c7cc34afe8be99d1581d9fcd36e3bed219d28d0fd3452 languageName: node linkType: hard @@ -18824,9 +18852,9 @@ __metadata: languageName: node linkType: hard -"update-browserslist-db@npm:^1.0.16": - version: 1.0.16 - resolution: "update-browserslist-db@npm:1.0.16" +"update-browserslist-db@npm:^1.1.0": + version: 1.1.0 + resolution: "update-browserslist-db@npm:1.1.0" dependencies: escalade: ^3.1.2 picocolors: ^1.0.1 @@ -18834,7 +18862,7 @@ __metadata: browserslist: ">= 4.21.0" bin: update-browserslist-db: cli.js - checksum: 51b1f7189c9ea5925c80154b0a6fd3ec36106d07858d8f69826427d8edb4735d1801512c69eade38ba0814d7407d11f400d74440bbf3da0309f3d788017f35b2 + checksum: 7b74694d96f0c360f01b702e72353dc5a49df4fe6663d3ee4e5c628f061576cddf56af35a3a886238c01dd3d8f231b7a86a8ceaa31e7a9220ae31c1c1238e562 languageName: node linkType: hard @@ -18845,7 +18873,7 @@ __metadata: languageName: node linkType: hard -"uri-js@npm:^4.2.2, uri-js@npm:^4.4.1": +"uri-js@npm:^4.2.2": version: 4.4.1 resolution: "uri-js@npm:4.4.1" dependencies: @@ -18993,7 +19021,7 @@ __metadata: "@ember/test-waiters": ^3.1.0 "@glimmer/component": ^1.1.2 "@glimmer/tracking": ^1.1.2 - "@hashicorp/design-system-components": ^4.1.0 + "@hashicorp/design-system-components": ^4.6.0 "@hashicorp/ember-flight-icons": ^5.0.1 "@icholy/duration": ^5.1.0 "@lineal-viz/lineal": ^0.5.1 @@ -19718,10 +19746,10 @@ __metadata: languageName: node linkType: hard -"yoctocolors-cjs@npm:^2.1.1": - version: 2.1.1 - resolution: "yoctocolors-cjs@npm:2.1.1" - checksum: e286a6651f7863316552dacc4cec89e028e8bfd3d2fd9ea60dcf03068c2325bd440df67cc566a4b252dbf348481e644a411be2736a990fc3cbd684f00f1f8349 +"yoctocolors-cjs@npm:^2.1.2": + version: 2.1.2 + resolution: "yoctocolors-cjs@npm:2.1.2" + checksum: 1c474d4b30a8c130e679279c5c2c33a0d48eba9684ffa0252cc64846c121fb56c3f25457fef902edbe1e2d7a7872130073a9fc8e795299d75e13fa3f5f548f1b languageName: node linkType: hard diff --git a/vault/activity/test_fixtures/aug.csv b/vault/activity/test_fixtures/aug.csv index 575a8953e801..66b63c563955 100644 --- a/vault/activity/test_fixtures/aug.csv +++ b/vault/activity/test_fixtures/aug.csv @@ -1,21 +1,21 @@ -client_id,namespace_id,timestamp,non_entity,mount_accessor -111122222-3333-4444-5555-000000000000,root,1,false,auth_1 -111122222-3333-4444-5555-000000000001,root,1,false,auth_1 -111122222-3333-4444-5555-000000000002,root,1,false,auth_1 -111122222-3333-4444-5555-000000000003,root,1,false,auth_1 -111122222-3333-4444-5555-000000000004,root,1,false,auth_1 -111122222-3333-4444-5555-000000000005,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000006,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000007,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000008,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000009,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000010,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000011,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000012,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000013,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000014,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000015,root,2,false,auth_4 -111122222-3333-4444-5555-000000000016,root,2,false,auth_4 -111122222-3333-4444-5555-000000000017,root,2,false,auth_4 -111122222-3333-4444-5555-000000000018,root,2,false,auth_4 -111122222-3333-4444-5555-000000000019,root,2,false,auth_4 +client_id,client_type,namespace_id,namespace_path,mount_accessor,timestamp +111122222-3333-4444-5555-000000000000,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000001,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000002,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000003,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000004,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000005,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000006,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000007,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000008,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000009,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000010,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000011,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000012,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000013,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000014,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000015,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000016,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000017,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000018,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000019,entity,root,,auth_4,"1970-01-01T00:00:02Z" diff --git a/vault/activity/test_fixtures/aug.json b/vault/activity/test_fixtures/aug.json index 17f117ab2b6f..cd8aa31ed949 100644 --- a/vault/activity/test_fixtures/aug.json +++ b/vault/activity/test_fixtures/aug.json @@ -1,20 +1,20 @@ -{"client_id":"111122222-3333-4444-5555-000000000000","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000001","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000002","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000003","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000004","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000005","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000006","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000007","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000008","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000009","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000010","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000011","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000012","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000013","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000014","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000015","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000016","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000017","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000018","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000019","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000000","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000001","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000002","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000003","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000004","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000005","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000006","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000007","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000008","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000009","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000010","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000011","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000012","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000013","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000014","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000015","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000016","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000017","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000018","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000019","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} diff --git a/vault/activity/test_fixtures/aug_oct.csv b/vault/activity/test_fixtures/aug_oct.csv index d7a3848b1710..0df8ba3ac3c5 100644 --- a/vault/activity/test_fixtures/aug_oct.csv +++ b/vault/activity/test_fixtures/aug_oct.csv @@ -1,41 +1,41 @@ -client_id,namespace_id,timestamp,non_entity,mount_accessor -111122222-3333-4444-5555-000000000000,root,1,false,auth_1 -111122222-3333-4444-5555-000000000001,root,1,false,auth_1 -111122222-3333-4444-5555-000000000002,root,1,false,auth_1 -111122222-3333-4444-5555-000000000003,root,1,false,auth_1 -111122222-3333-4444-5555-000000000004,root,1,false,auth_1 -111122222-3333-4444-5555-000000000005,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000006,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000007,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000008,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000009,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000010,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000011,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000012,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000013,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000014,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000015,root,2,false,auth_4 -111122222-3333-4444-5555-000000000016,root,2,false,auth_4 -111122222-3333-4444-5555-000000000017,root,2,false,auth_4 -111122222-3333-4444-5555-000000000018,root,2,false,auth_4 -111122222-3333-4444-5555-000000000019,root,2,false,auth_4 -111122222-3333-4444-5555-000000000020,root,3,false,auth_5 -111122222-3333-4444-5555-000000000021,root,3,false,auth_5 -111122222-3333-4444-5555-000000000022,root,3,false,auth_5 -111122222-3333-4444-5555-000000000023,root,3,false,auth_5 -111122222-3333-4444-5555-000000000024,root,3,false,auth_5 -111122222-3333-4444-5555-000000000025,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000026,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000027,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000028,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000029,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000030,root,4,false,auth_7 -111122222-3333-4444-5555-000000000031,root,4,false,auth_7 -111122222-3333-4444-5555-000000000032,root,4,false,auth_7 -111122222-3333-4444-5555-000000000033,root,4,false,auth_7 -111122222-3333-4444-5555-000000000034,root,4,false,auth_7 -111122222-3333-4444-5555-000000000035,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000036,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000037,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000038,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000039,bbbbb,4,false,auth_8 +client_id,client_type,namespace_id,namespace_path,mount_accessor,timestamp +111122222-3333-4444-5555-000000000000,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000001,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000002,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000003,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000004,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000005,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000006,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000007,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000008,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000009,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000010,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000011,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000012,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000013,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000014,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000015,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000016,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000017,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000018,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000019,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000020,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000021,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000022,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000023,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000024,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000025,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000026,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000027,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000028,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000029,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000030,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000031,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000032,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000033,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000034,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000035,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000036,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000037,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000038,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000039,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" diff --git a/vault/activity/test_fixtures/aug_oct.json b/vault/activity/test_fixtures/aug_oct.json index 011b9f6d8586..f5f682c7c91f 100644 --- a/vault/activity/test_fixtures/aug_oct.json +++ b/vault/activity/test_fixtures/aug_oct.json @@ -1,40 +1,40 @@ -{"client_id":"111122222-3333-4444-5555-000000000000","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000001","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000002","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000003","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000004","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000005","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000006","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000007","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000008","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000009","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000010","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000011","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000012","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000013","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000014","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000015","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000016","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000017","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000018","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000019","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000020","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000021","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000022","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000023","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000024","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000025","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000026","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000027","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000028","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000029","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000030","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000031","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000032","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000033","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000034","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000035","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000036","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000037","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000038","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000039","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000000","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000001","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000002","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000003","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000004","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000005","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000006","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000007","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000008","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000009","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000010","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000011","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000012","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000013","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000014","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000015","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000016","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000017","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000018","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000019","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000020","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000021","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000022","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000023","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000024","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000025","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000026","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000027","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000028","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000029","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000030","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000031","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000032","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000033","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000034","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000035","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000036","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000037","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000038","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000039","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} diff --git a/vault/activity/test_fixtures/aug_sep.csv b/vault/activity/test_fixtures/aug_sep.csv index 34549f8eda68..5ee2b4829739 100644 --- a/vault/activity/test_fixtures/aug_sep.csv +++ b/vault/activity/test_fixtures/aug_sep.csv @@ -1,31 +1,31 @@ -client_id,namespace_id,timestamp,non_entity,mount_accessor -111122222-3333-4444-5555-000000000000,root,1,false,auth_1 -111122222-3333-4444-5555-000000000001,root,1,false,auth_1 -111122222-3333-4444-5555-000000000002,root,1,false,auth_1 -111122222-3333-4444-5555-000000000003,root,1,false,auth_1 -111122222-3333-4444-5555-000000000004,root,1,false,auth_1 -111122222-3333-4444-5555-000000000005,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000006,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000007,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000008,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000009,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000010,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000011,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000012,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000013,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000014,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000015,root,2,false,auth_4 -111122222-3333-4444-5555-000000000016,root,2,false,auth_4 -111122222-3333-4444-5555-000000000017,root,2,false,auth_4 -111122222-3333-4444-5555-000000000018,root,2,false,auth_4 -111122222-3333-4444-5555-000000000019,root,2,false,auth_4 -111122222-3333-4444-5555-000000000020,root,3,false,auth_5 -111122222-3333-4444-5555-000000000021,root,3,false,auth_5 -111122222-3333-4444-5555-000000000022,root,3,false,auth_5 -111122222-3333-4444-5555-000000000023,root,3,false,auth_5 -111122222-3333-4444-5555-000000000024,root,3,false,auth_5 -111122222-3333-4444-5555-000000000025,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000026,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000027,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000028,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000029,ccccc,3,false,auth_6 +client_id,client_type,namespace_id,namespace_path,mount_accessor,timestamp +111122222-3333-4444-5555-000000000000,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000001,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000002,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000003,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000004,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000005,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000006,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000007,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000008,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000009,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000010,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000011,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000012,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000013,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000014,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000015,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000016,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000017,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000018,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000019,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000020,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000021,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000022,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000023,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000024,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000025,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000026,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000027,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000028,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000029,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" diff --git a/vault/activity/test_fixtures/aug_sep.json b/vault/activity/test_fixtures/aug_sep.json index fb3a52193137..46f0e016c241 100644 --- a/vault/activity/test_fixtures/aug_sep.json +++ b/vault/activity/test_fixtures/aug_sep.json @@ -1,30 +1,30 @@ -{"client_id":"111122222-3333-4444-5555-000000000000","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000001","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000002","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000003","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000004","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000005","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000006","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000007","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000008","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000009","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000010","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000011","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000012","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000013","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000014","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000015","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000016","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000017","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000018","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000019","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000020","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000021","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000022","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000023","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000024","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000025","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000026","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000027","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000028","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000029","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000000","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000001","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000002","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000003","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000004","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000005","client_type":"entity","namespace_path":"aaaaa/","namespace_id":"aaaaa","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000006","client_type":"entity","namespace_path":"aaaaa/","namespace_id":"aaaaa","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000007","client_type":"entity","namespace_path":"aaaaa/","namespace_id":"aaaaa","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000008","client_type":"entity","namespace_path":"aaaaa/","namespace_id":"aaaaa","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000009","client_type":"entity","namespace_path":"aaaaa/","namespace_id":"aaaaa","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000010","client_type":"entity","namespace_path":"bbbbb/","namespace_id":"bbbbb","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000011","client_type":"entity","namespace_path":"bbbbb/","namespace_id":"bbbbb","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000012","client_type":"entity","namespace_path":"bbbbb/","namespace_id":"bbbbb","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000013","client_type":"entity","namespace_path":"bbbbb/","namespace_id":"bbbbb","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000014","client_type":"entity","namespace_path":"bbbbb/","namespace_id":"bbbbb","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000015","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000016","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000017","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000018","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000019","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000020","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000021","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000022","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000023","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000024","client_type":"entity","namespace_path":"","namespace_id":"root","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000025","client_type":"entity","namespace_path":"ccccc/","namespace_id":"ccccc","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000026","client_type":"entity","namespace_path":"ccccc/","namespace_id":"ccccc","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000027","client_type":"entity","namespace_path":"ccccc/","namespace_id":"ccccc","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000028","client_type":"entity","namespace_path":"ccccc/","namespace_id":"ccccc","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000029","client_type":"entity","namespace_path":"ccccc/","namespace_id":"ccccc","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} diff --git a/vault/activity/test_fixtures/full_history.csv b/vault/activity/test_fixtures/full_history.csv index 7a37ed236659..2cb1ba91fa77 100644 --- a/vault/activity/test_fixtures/full_history.csv +++ b/vault/activity/test_fixtures/full_history.csv @@ -1,46 +1,46 @@ -client_id,namespace_id,timestamp,non_entity,mount_accessor -111122222-3333-4444-5555-000000000040,rrrrr,0,false,auth_9 -111122222-3333-4444-5555-000000000041,rrrrr,0,false,auth_9 -111122222-3333-4444-5555-000000000042,rrrrr,0,false,auth_9 -111122222-3333-4444-5555-000000000043,rrrrr,0,false,auth_9 -111122222-3333-4444-5555-000000000044,rrrrr,0,false,auth_9 -111122222-3333-4444-5555-000000000000,root,1,false,auth_1 -111122222-3333-4444-5555-000000000001,root,1,false,auth_1 -111122222-3333-4444-5555-000000000002,root,1,false,auth_1 -111122222-3333-4444-5555-000000000003,root,1,false,auth_1 -111122222-3333-4444-5555-000000000004,root,1,false,auth_1 -111122222-3333-4444-5555-000000000005,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000006,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000007,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000008,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000009,aaaaa,1,false,auth_2 -111122222-3333-4444-5555-000000000010,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000011,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000012,bbbbb,1,false,auth_3 -111122222-3333-4444-5555-000000000013,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000014,bbbbb,2,false,auth_3 -111122222-3333-4444-5555-000000000015,root,2,false,auth_4 -111122222-3333-4444-5555-000000000016,root,2,false,auth_4 -111122222-3333-4444-5555-000000000017,root,2,false,auth_4 -111122222-3333-4444-5555-000000000018,root,2,false,auth_4 -111122222-3333-4444-5555-000000000019,root,2,false,auth_4 -111122222-3333-4444-5555-000000000020,root,3,false,auth_5 -111122222-3333-4444-5555-000000000021,root,3,false,auth_5 -111122222-3333-4444-5555-000000000022,root,3,false,auth_5 -111122222-3333-4444-5555-000000000023,root,3,false,auth_5 -111122222-3333-4444-5555-000000000024,root,3,false,auth_5 -111122222-3333-4444-5555-000000000025,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000026,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000027,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000028,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000029,ccccc,3,false,auth_6 -111122222-3333-4444-5555-000000000030,root,4,false,auth_7 -111122222-3333-4444-5555-000000000031,root,4,false,auth_7 -111122222-3333-4444-5555-000000000032,root,4,false,auth_7 -111122222-3333-4444-5555-000000000033,root,4,false,auth_7 -111122222-3333-4444-5555-000000000034,root,4,false,auth_7 -111122222-3333-4444-5555-000000000035,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000036,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000037,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000038,bbbbb,4,false,auth_8 -111122222-3333-4444-5555-000000000039,bbbbb,4,false,auth_8 +client_id,client_type,namespace_id,namespace_path,mount_accessor,timestamp +111122222-3333-4444-5555-000000000040,entity,rrrrr,rrrrr/,auth_9,"1970-01-01T00:00:00Z" +111122222-3333-4444-5555-000000000041,entity,rrrrr,rrrrr/,auth_9,"1970-01-01T00:00:00Z" +111122222-3333-4444-5555-000000000042,entity,rrrrr,rrrrr/,auth_9,"1970-01-01T00:00:00Z" +111122222-3333-4444-5555-000000000043,entity,rrrrr,rrrrr/,auth_9,"1970-01-01T00:00:00Z" +111122222-3333-4444-5555-000000000044,entity,rrrrr,rrrrr/,auth_9,"1970-01-01T00:00:00Z" +111122222-3333-4444-5555-000000000000,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000001,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000002,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000003,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000004,entity,root,,auth_1,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000005,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000006,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000007,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000008,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000009,entity,aaaaa,aaaaa/,auth_2,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000010,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000011,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000012,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:01Z" +111122222-3333-4444-5555-000000000013,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000014,entity,bbbbb,bbbbb/,auth_3,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000015,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000016,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000017,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000018,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000019,entity,root,,auth_4,"1970-01-01T00:00:02Z" +111122222-3333-4444-5555-000000000020,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000021,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000022,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000023,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000024,entity,root,,auth_5,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000025,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000026,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000027,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000028,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000029,entity,ccccc,ccccc/,auth_6,"1970-01-01T00:00:03Z" +111122222-3333-4444-5555-000000000030,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000031,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000032,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000033,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000034,entity,root,,auth_7,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000035,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000036,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000037,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000038,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" +111122222-3333-4444-5555-000000000039,entity,bbbbb,bbbbb/,auth_8,"1970-01-01T00:00:04Z" diff --git a/vault/activity/test_fixtures/full_history.json b/vault/activity/test_fixtures/full_history.json index 7516adfad364..38d765395137 100644 --- a/vault/activity/test_fixtures/full_history.json +++ b/vault/activity/test_fixtures/full_history.json @@ -1,45 +1,45 @@ -{"client_id":"111122222-3333-4444-5555-000000000040","namespace_id":"rrrrr","mount_accessor":"auth_9"} -{"client_id":"111122222-3333-4444-5555-000000000041","namespace_id":"rrrrr","mount_accessor":"auth_9"} -{"client_id":"111122222-3333-4444-5555-000000000042","namespace_id":"rrrrr","mount_accessor":"auth_9"} -{"client_id":"111122222-3333-4444-5555-000000000043","namespace_id":"rrrrr","mount_accessor":"auth_9"} -{"client_id":"111122222-3333-4444-5555-000000000044","namespace_id":"rrrrr","mount_accessor":"auth_9"} -{"client_id":"111122222-3333-4444-5555-000000000000","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000001","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000002","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000003","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000004","namespace_id":"root","timestamp":1,"mount_accessor":"auth_1"} -{"client_id":"111122222-3333-4444-5555-000000000005","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000006","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000007","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000008","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000009","namespace_id":"aaaaa","timestamp":1,"mount_accessor":"auth_2"} -{"client_id":"111122222-3333-4444-5555-000000000010","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000011","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000012","namespace_id":"bbbbb","timestamp":1,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000013","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000014","namespace_id":"bbbbb","timestamp":2,"mount_accessor":"auth_3"} -{"client_id":"111122222-3333-4444-5555-000000000015","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000016","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000017","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000018","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000019","namespace_id":"root","timestamp":2,"mount_accessor":"auth_4"} -{"client_id":"111122222-3333-4444-5555-000000000020","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000021","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000022","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000023","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000024","namespace_id":"root","timestamp":3,"mount_accessor":"auth_5"} -{"client_id":"111122222-3333-4444-5555-000000000025","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000026","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000027","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000028","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000029","namespace_id":"ccccc","timestamp":3,"mount_accessor":"auth_6"} -{"client_id":"111122222-3333-4444-5555-000000000030","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000031","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000032","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000033","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000034","namespace_id":"root","timestamp":4,"mount_accessor":"auth_7"} -{"client_id":"111122222-3333-4444-5555-000000000035","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000036","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000037","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000038","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} -{"client_id":"111122222-3333-4444-5555-000000000039","namespace_id":"bbbbb","timestamp":4,"mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000040","client_type":"entity","namespace_id":"rrrrr","namespace_path":"rrrrr/","timestamp":"1970-01-01T00:00:00Z","mount_accessor":"auth_9"} +{"client_id":"111122222-3333-4444-5555-000000000041","client_type":"entity","namespace_id":"rrrrr","namespace_path":"rrrrr/","timestamp":"1970-01-01T00:00:00Z","mount_accessor":"auth_9"} +{"client_id":"111122222-3333-4444-5555-000000000042","client_type":"entity","namespace_id":"rrrrr","namespace_path":"rrrrr/","timestamp":"1970-01-01T00:00:00Z","mount_accessor":"auth_9"} +{"client_id":"111122222-3333-4444-5555-000000000043","client_type":"entity","namespace_id":"rrrrr","namespace_path":"rrrrr/","timestamp":"1970-01-01T00:00:00Z","mount_accessor":"auth_9"} +{"client_id":"111122222-3333-4444-5555-000000000044","client_type":"entity","namespace_id":"rrrrr","namespace_path":"rrrrr/","timestamp":"1970-01-01T00:00:00Z","mount_accessor":"auth_9"} +{"client_id":"111122222-3333-4444-5555-000000000000","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000001","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000002","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000003","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000004","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_1"} +{"client_id":"111122222-3333-4444-5555-000000000005","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000006","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000007","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000008","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000009","client_type":"entity","namespace_id":"aaaaa","namespace_path":"aaaaa/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_2"} +{"client_id":"111122222-3333-4444-5555-000000000010","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000011","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000012","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:01Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000013","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000014","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_3"} +{"client_id":"111122222-3333-4444-5555-000000000015","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000016","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000017","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000018","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000019","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:02Z","mount_accessor":"auth_4"} +{"client_id":"111122222-3333-4444-5555-000000000020","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000021","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000022","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000023","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000024","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_5"} +{"client_id":"111122222-3333-4444-5555-000000000025","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000026","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000027","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000028","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000029","client_type":"entity","namespace_id":"ccccc","namespace_path":"ccccc/","timestamp":"1970-01-01T00:00:03Z","mount_accessor":"auth_6"} +{"client_id":"111122222-3333-4444-5555-000000000030","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000031","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000032","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000033","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000034","client_type":"entity","namespace_id":"root","namespace_path":"","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_7"} +{"client_id":"111122222-3333-4444-5555-000000000035","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000036","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000037","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000038","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} +{"client_id":"111122222-3333-4444-5555-000000000039","client_type":"entity","namespace_id":"bbbbb","namespace_path":"bbbbb/","timestamp":"1970-01-01T00:00:04Z","mount_accessor":"auth_8"} diff --git a/vault/activity_log.go b/vault/activity_log.go index 2931a37320e1..9fd53897d92b 100644 --- a/vault/activity_log.go +++ b/vault/activity_log.go @@ -96,9 +96,23 @@ const ( // activityLogMaximumRetentionMonths sets the default maximum retention_months // to enforce when reporting is enabled. activityLogMaximumRetentionMonths = 60 + + // ActivityExportInvalidFormatPrefix is used to check validation errors for the + // activity log export API handler + ActivityExportInvalidFormatPrefix = "invalid format" ) -var ActivityClientTypes = []string{nonEntityTokenActivityType, entityActivityType, secretSyncActivityType, ACMEActivityType} +var ( + ActivityClientTypes = []string{nonEntityTokenActivityType, entityActivityType, secretSyncActivityType, ACMEActivityType} + + // ErrActivityExportInProgress is used to check validation errors for the + // activity log export API handler + ErrActivityExportInProgress = errors.New("existing export in progress") + + // ErrActivityExportNoDataInRange is used to check validation errors for the + // activity log export API handler + ErrActivityExportNoDataInRange = errors.New("no data to export in provided time range") +) type segmentInfo struct { startTimestamp int64 @@ -143,7 +157,8 @@ type ActivityLog struct { // nodeID is the ID to use for all fragments that // are generated. - // TODO: use secondary ID when available? + // This uses the primary ID as of right now, but + // could be adapted to use a secondary in the future. nodeID string // current log fragment (may be nil) @@ -187,14 +202,6 @@ type ActivityLog struct { inprocessExport *atomic.Bool - // CensusReportDone is a channel used to signal tests upon successful calls - // to (CensusReporter).Write() in CensusReport. - CensusReportDone chan bool - - // CensusReportInterval is the testing configuration for time between - // Write() calls initiated in CensusReport. - CensusReportInterval time.Duration - // clock is used to support manipulating time in unit and integration tests clock timeutil.Clock // precomputedQueryWritten receives an element whenever a precomputed query @@ -214,9 +221,6 @@ type ActivityLogCoreConfig struct { // Do not start timers to send or persist fragments. DisableTimers bool - // CensusReportInterval is the testing configuration for time - CensusReportInterval time.Duration - // MinimumRetentionMonths defines the minimum value for retention MinimumRetentionMonths int @@ -227,6 +231,24 @@ type ActivityLogCoreConfig struct { DisableInvalidation bool } +// ActivityLogExportRecord is the output structure for activity export +// API records. The omitempty JSON tag is not used to ensure that the +// fields are consistent between CSV and JSON output. +type ActivityLogExportRecord struct { + ClientID string `json:"client_id" mapstructure:"client_id"` + NamespaceID string `json:"namespace_id" mapstructure:"namespace_id"` + NamespacePath string `json:"namespace_path" mapstructure:"namespace_path"` + Timestamp string `json:"timestamp" mapstructure:"timestamp"` + + // MountAccessor is the auth mount accessor of the token used to perform the + // activity. + MountAccessor string `json:"mount_accessor" mapstructure:"mount_accessor"` + + // ClientType identifies the source of the entity record (entity, + // non-entity, acme, etc.) + ClientType string `json:"client_type" mapstructure:"client_type"` +} + // NewActivityLog creates an activity log. func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics metricsutil.Metrics) (*ActivityLog, error) { hostname, err := os.Hostname() @@ -249,7 +271,6 @@ func NewActivityLog(core *Core, logger log.Logger, view *BarrierView, metrics me sendCh: make(chan struct{}, 1), // buffered so it can be triggered by fragment size doneCh: make(chan struct{}, 1), partialMonthClientTracker: make(map[string]*activity.EntityRecord), - CensusReportInterval: time.Hour * 1, clock: clock, currentSegment: segmentInfo{ startTimestamp: 0, @@ -998,7 +1019,7 @@ func (a *ActivityLog) refreshFromStoredLog(ctx context.Context, wg *sync.WaitGro return nil } -// This version is used during construction +// SetConfigInit is used during construction func (a *ActivityLog) SetConfigInit(config activityConfig) { switch config.Enabled { case "enable": @@ -1020,13 +1041,9 @@ func (a *ActivityLog) SetConfigInit(config activityConfig) { if a.configOverrides.MinimumRetentionMonths > 0 { a.retentionMonths = a.configOverrides.MinimumRetentionMonths } - - if a.configOverrides.CensusReportInterval > 0 { - a.CensusReportInterval = a.configOverrides.CensusReportInterval - } } -// This version reacts to user changes +// SetConfig reacts to user changes func (a *ActivityLog) SetConfig(ctx context.Context, config activityConfig) { a.l.Lock() defer a.l.Unlock() @@ -1685,9 +1702,7 @@ func (a *ActivityLog) receivedFragment(fragment *activity.LogFragment) { } type ResponseCounts struct { - DistinctEntities int `json:"distinct_entities" mapstructure:"distinct_entities"` EntityClients int `json:"entity_clients" mapstructure:"entity_clients"` - NonEntityTokens int `json:"non_entity_tokens" mapstructure:"non_entity_tokens"` NonEntityClients int `json:"non_entity_clients" mapstructure:"non_entity_clients"` Clients int `json:"clients"` SecretSyncs int `json:"secret_syncs" mapstructure:"secret_syncs"` @@ -1701,9 +1716,7 @@ func (r *ResponseCounts) Add(newRecord *ResponseCounts) { } r.EntityClients += newRecord.EntityClients r.Clients += newRecord.Clients - r.DistinctEntities += newRecord.DistinctEntities r.NonEntityClients += newRecord.NonEntityClients - r.NonEntityTokens += newRecord.NonEntityTokens r.ACMEClients += newRecord.ACMEClients r.SecretSyncs += newRecord.SecretSyncs } @@ -1715,30 +1728,6 @@ type ResponseNamespace struct { Mounts []*ResponseMount `json:"mounts"` } -// Add adds the namespace counts to the existing record, then either adds the -// mount counts to the existing mount (if it exists) or appends the mount to the -// list of mounts -func (r *ResponseNamespace) Add(newRecord *ResponseNamespace) { - // Create a map of the existing mounts, so we don't duplicate them - mountMap := make(map[string]*ResponseCounts) - for _, erm := range r.Mounts { - mountMap[erm.MountPath] = erm.Counts - } - - r.Counts.Add(&newRecord.Counts) - - // Check the current month mounts against the existing mounts and if there are matches, update counts - // accordingly. If there is no match, append the new mount to the existing mounts, so it will be counted - // later. - for _, newRecordMount := range newRecord.Mounts { - if existingRecordMountCounts, ok := mountMap[newRecordMount.MountPath]; ok { - existingRecordMountCounts.Add(newRecordMount.Counts) - } else { - r.Mounts = append(r.Mounts, newRecordMount) - } - } -} - type ResponseMonth struct { Timestamp string `json:"timestamp"` Counts *ResponseCounts `json:"counts"` @@ -1948,8 +1937,6 @@ type activityConfig struct { // Enabled is one of enable, disable, default. Enabled string `json:"enabled"` - - CensusReportInterval time.Duration `json:"census_report_interval"` } func defaultActivityConfig() activityConfig { @@ -2747,7 +2734,7 @@ func (a *ActivityLog) calculateByNamespaceResponseForQuery(ctx context.Context, for _, mountRecord := range nsRecord.Mounts { mountResponse = append(mountResponse, &ResponseMount{ MountPath: mountRecord.MountPath, - Counts: a.countsRecordToCountsResponse(mountRecord.Counts, true), + Counts: a.countsRecordToCountsResponse(mountRecord.Counts), }) } // Sort the mounts in descending order of usage @@ -2779,11 +2766,11 @@ func (a *ActivityLog) prepareMonthsResponseForQuery(ctx context.Context, byMonth for _, monthsRecord := range byMonth { newClientsResponse := &ResponseNewClients{} if monthsRecord.NewClients.Counts.HasCounts() { - newClientsNSResponse, err := a.prepareNamespaceResponse(ctx, monthsRecord.NewClients.Namespaces) + newClientsTotal, newClientsNSResponse, err := a.prepareNamespaceResponse(ctx, monthsRecord.NewClients.Namespaces) if err != nil { return nil, err } - newClientsResponse.Counts = a.countsRecordToCountsResponse(monthsRecord.NewClients.Counts, false) + newClientsResponse.Counts = newClientsTotal newClientsResponse.Namespaces = newClientsNSResponse } @@ -2791,11 +2778,11 @@ func (a *ActivityLog) prepareMonthsResponseForQuery(ctx context.Context, byMonth Timestamp: time.Unix(monthsRecord.Timestamp, 0).UTC().Format(time.RFC3339), } if monthsRecord.Counts.HasCounts() { - nsResponse, err := a.prepareNamespaceResponse(ctx, monthsRecord.Namespaces) + monthTotal, nsResponse, err := a.prepareNamespaceResponse(ctx, monthsRecord.Namespaces) if err != nil { return nil, err } - monthResponse.Counts = a.countsRecordToCountsResponse(monthsRecord.Counts, false) + monthResponse.Counts = monthTotal monthResponse.Namespaces = nsResponse monthResponse.NewClients = newClientsResponse months = append(months, monthResponse) @@ -2804,14 +2791,16 @@ func (a *ActivityLog) prepareMonthsResponseForQuery(ctx context.Context, byMonth return months, nil } -// prepareNamespaceResponse populates the namespace portion of the activity log response struct -// from -func (a *ActivityLog) prepareNamespaceResponse(ctx context.Context, nsRecords []*activity.MonthlyNamespaceRecord) ([]*ResponseNamespace, error) { +// prepareNamespaceResponse takes monthly namespace records and converts them +// into the response namespace format. The function also returns counts for the +// total number of clients per type seen that month. +func (a *ActivityLog) prepareNamespaceResponse(ctx context.Context, nsRecords []*activity.MonthlyNamespaceRecord) (*ResponseCounts, []*ResponseNamespace, error) { queryNS, err := namespace.FromContext(ctx) if err != nil { - return nil, err + return nil, nil, err } - nsResponse := make([]*ResponseNamespace, 0, len(nsRecords)) + totalCounts := &ResponseCounts{} + nsResponses := make([]*ResponseNamespace, 0, len(nsRecords)) for _, nsRecord := range nsRecords { if !nsRecord.Counts.HasCounts() { continue @@ -2819,7 +2808,7 @@ func (a *ActivityLog) prepareNamespaceResponse(ctx context.Context, nsRecords [] ns, err := NamespaceByID(ctx, nsRecord.NamespaceID, a.core) if err != nil { - return nil, err + return nil, nil, err } if a.includeInResponse(queryNS, ns) { mountResponse := make([]*ResponseMount, 0, len(nsRecord.Mounts)) @@ -2830,7 +2819,7 @@ func (a *ActivityLog) prepareNamespaceResponse(ctx context.Context, nsRecords [] mountResponse = append(mountResponse, &ResponseMount{ MountPath: mountRecord.MountPath, - Counts: a.countsRecordToCountsResponse(mountRecord.Counts, false), + Counts: a.countsRecordToCountsResponse(mountRecord.Counts), }) } @@ -2840,15 +2829,18 @@ func (a *ActivityLog) prepareNamespaceResponse(ctx context.Context, nsRecords [] } else { displayPath = ns.Path } - nsResponse = append(nsResponse, &ResponseNamespace{ + nsResponse := &ResponseNamespace{ NamespaceID: nsRecord.NamespaceID, NamespacePath: displayPath, - Counts: *a.countsRecordToCountsResponse(nsRecord.Counts, false), + Counts: *a.countsRecordToCountsResponse(nsRecord.Counts), Mounts: mountResponse, - }) + } + nsResponses = append(nsResponses, nsResponse) + + totalCounts.Add(&nsResponse.Counts) } } - return nsResponse, nil + return totalCounts, nsResponses, nil } // partialMonthClientCount returns the number of clients used so far this month. @@ -2884,9 +2876,7 @@ func (a *ActivityLog) partialMonthClientCount(ctx context.Context) (map[string]i // Now populate the response based on breakdowns. responseData := make(map[string]interface{}) responseData["by_namespace"] = byNamespaceResponse - responseData["distinct_entities"] = totalCounts.EntityClients responseData["entity_clients"] = totalCounts.EntityClients - responseData["non_entity_tokens"] = totalCounts.NonEntityClients responseData["non_entity_clients"] = totalCounts.NonEntityClients responseData["clients"] = totalCounts.Clients responseData["secret_syncs"] = totalCounts.SecretSyncs @@ -2923,10 +2913,9 @@ func (a *ActivityLog) partialMonthClientCount(ctx context.Context) (map[string]i } func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, format string, startTime, endTime time.Time) error { - // For capacity reasons only allow a single in-process export at a time. - // TODO do we really need to do this? + // Only allow a single in-process export at a time as they can be resource-intensive if !a.inprocessExport.CAS(false, true) { - return fmt.Errorf("existing export in progress") + return ErrActivityExportInProgress } defer a.inprocessExport.Store(false) @@ -2953,7 +2942,7 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f } if len(filteredList) == 0 { a.logger.Info("no data to export", "start_time", startTime, "end_time", endTime) - return fmt.Errorf("no data to export in provided time range") + return ErrActivityExportNoDataInRange } actualStartTime := filteredList[len(filteredList)-1] @@ -2962,14 +2951,16 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f // Add headers here because we start to immediately write in the csv encoder // constructor. rw.Header().Add("Content-Disposition", fmt.Sprintf("attachment; filename=\"activity_export_%d_to_%d.%s\"", actualStartTime.Unix(), endTime.Unix(), format)) - rw.Header().Add("Content-Type", fmt.Sprintf("application/%s", format)) var encoder encoder switch format { case "json": + rw.Header().Add("Content-Type", fmt.Sprintf("application/json")) encoder = newJSONEncoder(rw) case "csv": var err error + rw.Header().Add("Content-Type", fmt.Sprintf("text/csv")) + encoder, err = newCSVEncoder(rw) if err != nil { return fmt.Errorf("failed to create csv encoder: %w", err) @@ -2981,6 +2972,10 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f a.logger.Info("starting activity log export", "start_time", startTime, "end_time", endTime, "format", format) dedupedIds := make(map[string]struct{}) + reqNS, err := namespace.FromContext(ctx) + if err != nil { + return err + } walkEntities := func(l *activity.EntityActivityLog, startTime time.Time, hll *hyperloglog.Sketch) error { for _, e := range l.Clients { @@ -2989,17 +2984,35 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f } dedupedIds[e.ClientID] = struct{}{} - err := encoder.Encode(e) + + ns, err := NamespaceByID(ctx, e.NamespaceID, a.core) if err != nil { return err } + + if a.includeInResponse(reqNS, ns) { + ts := time.Unix(e.Timestamp, 0) + + record := &ActivityLogExportRecord{ + ClientID: e.ClientID, + NamespaceID: ns.ID, + NamespacePath: ns.Path, + Timestamp: ts.UTC().Format(time.RFC3339), + MountAccessor: e.MountAccessor, + ClientType: e.ClientType, + } + + err := encoder.Encode(record) + if err != nil { + return err + } + } } return nil } // For each month in the filtered list walk all the log segments - for _, startTime := range filteredList { err := a.WalkEntitySegments(ctx, startTime, nil, walkEntities) if err != nil { @@ -3020,7 +3033,7 @@ func (a *ActivityLog) writeExport(ctx context.Context, rw http.ResponseWriter, f } type encoder interface { - Encode(*activity.EntityRecord) error + Encode(*ActivityLogExportRecord) error Flush() Error() error } @@ -3037,7 +3050,7 @@ func newJSONEncoder(w io.Writer) *jsonEncoder { } } -func (j *jsonEncoder) Encode(er *activity.EntityRecord) error { +func (j *jsonEncoder) Encode(er *ActivityLogExportRecord) error { return j.e.Encode(er) } @@ -3051,22 +3064,12 @@ var _ encoder = (*csvEncoder)(nil) type csvEncoder struct { *csv.Writer + wroteHeader bool } func newCSVEncoder(w io.Writer) (*csvEncoder, error) { writer := csv.NewWriter(w) - err := writer.Write([]string{ - "client_id", - "namespace_id", - "timestamp", - "non_entity", - "mount_accessor", - }) - if err != nil { - return nil, err - } - return &csvEncoder{ Writer: writer, }, nil @@ -3074,12 +3077,30 @@ func newCSVEncoder(w io.Writer) (*csvEncoder, error) { // Encode converts an export bundle into a set of strings and writes them to the // csv writer. -func (c *csvEncoder) Encode(e *activity.EntityRecord) error { +func (c *csvEncoder) Encode(record *ActivityLogExportRecord) error { + if !c.wroteHeader { + + err := c.Writer.Write([]string{ + "client_id", + "client_type", + "namespace_id", + "namespace_path", + "mount_accessor", + "timestamp", + }) + if err != nil { + return err + } + + c.wroteHeader = true + } + return c.Writer.Write([]string{ - e.ClientID, - e.NamespaceID, - fmt.Sprintf("%d", e.Timestamp), - fmt.Sprintf("%t", e.NonEntity), - e.MountAccessor, + record.ClientID, + record.ClientType, + record.NamespaceID, + record.NamespacePath, + record.MountAccessor, + record.Timestamp, }) } diff --git a/vault/activity_log_test.go b/vault/activity_log_test.go index 620ce6f63493..2871e733c2a2 100644 --- a/vault/activity_log_test.go +++ b/vault/activity_log_test.go @@ -10,8 +10,6 @@ import ( "errors" "fmt" "net/http" - "os" - "path/filepath" "reflect" "sort" "strconv" @@ -1903,182 +1901,6 @@ func TestActivityLog_refreshFromStoredLogPreviousMonth(t *testing.T) { } } -// TestActivityLog_Export writes overlapping client for 5 months with various mounts and namespaces. It performs an -// export for various month ranges in the range, and verifies that the outputs are correct. -func TestActivityLog_Export(t *testing.T) { - timeutil.SkipAtEndOfMonth(t) - - january := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) - august := time.Date(2020, 8, 15, 12, 0, 0, 0, time.UTC) - september := timeutil.StartOfMonth(time.Date(2020, 9, 1, 0, 0, 0, 0, time.UTC)) - october := timeutil.StartOfMonth(time.Date(2020, 10, 1, 0, 0, 0, 0, time.UTC)) - november := timeutil.StartOfMonth(time.Date(2020, 11, 1, 0, 0, 0, 0, time.UTC)) - - core, _, _ := TestCoreUnsealedWithConfig(t, &CoreConfig{ - ActivityLogConfig: ActivityLogCoreConfig{ - DisableTimers: true, - ForceEnable: true, - }, - }) - a := core.activityLog - ctx := namespace.RootContext(nil) - - // Generate overlapping sets of entity IDs from this list. - // january: 40-44 RRRRR - // first month: 0-19 RRRRRAAAAABBBBBRRRRR - // second month: 10-29 BBBBBRRRRRRRRRRCCCCC - // third month: 15-39 RRRRRRRRRRCCCCCRRRRRBBBBB - - entityRecords := make([]*activity.EntityRecord, 45) - entityNamespaces := []string{"root", "aaaaa", "bbbbb", "root", "root", "ccccc", "root", "bbbbb", "rrrrr"} - authMethods := []string{"auth_1", "auth_2", "auth_3", "auth_4", "auth_5", "auth_6", "auth_7", "auth_8", "auth_9"} - - for i := range entityRecords { - entityRecords[i] = &activity.EntityRecord{ - ClientID: fmt.Sprintf("111122222-3333-4444-5555-%012v", i), - NamespaceID: entityNamespaces[i/5], - MountAccessor: authMethods[i/5], - } - } - - toInsert := []struct { - StartTime int64 - Segment uint64 - Clients []*activity.EntityRecord - }{ - // January, should not be included - { - january.Unix(), - 0, - entityRecords[40:45], - }, - // Artifically split August and October - { // 1 - august.Unix(), - 0, - entityRecords[:13], - }, - { // 2 - august.Unix(), - 1, - entityRecords[13:20], - }, - { // 3 - september.Unix(), - 0, - entityRecords[10:30], - }, - { // 4 - october.Unix(), - 0, - entityRecords[15:40], - }, - { - october.Unix(), - 1, - entityRecords[15:40], - }, - { - october.Unix(), - 2, - entityRecords[17:23], - }, - } - - for i, segment := range toInsert { - eal := &activity.EntityActivityLog{ - Clients: segment.Clients, - } - - // Mimic a lower time stamp for earlier clients - for _, c := range eal.Clients { - c.Timestamp = int64(i) - } - - data, err := proto.Marshal(eal) - if err != nil { - t.Fatal(err) - } - path := fmt.Sprintf("%ventity/%v/%v", ActivityLogPrefix, segment.StartTime, segment.Segment) - WriteToStorage(t, core, path, data) - } - - tCases := []struct { - format string - startTime time.Time - endTime time.Time - expected string - }{ - { - format: "json", - startTime: august, - endTime: timeutil.EndOfMonth(september), - expected: "aug_sep.json", - }, - { - format: "csv", - startTime: august, - endTime: timeutil.EndOfMonth(september), - expected: "aug_sep.csv", - }, - { - format: "json", - startTime: january, - endTime: timeutil.EndOfMonth(november), - expected: "full_history.json", - }, - { - format: "csv", - startTime: january, - endTime: timeutil.EndOfMonth(november), - expected: "full_history.csv", - }, - { - format: "json", - startTime: august, - endTime: timeutil.EndOfMonth(october), - expected: "aug_oct.json", - }, - { - format: "csv", - startTime: august, - endTime: timeutil.EndOfMonth(october), - expected: "aug_oct.csv", - }, - { - format: "json", - startTime: august, - endTime: timeutil.EndOfMonth(august), - expected: "aug.json", - }, - { - format: "csv", - startTime: august, - endTime: timeutil.EndOfMonth(august), - expected: "aug.csv", - }, - } - - for _, tCase := range tCases { - rw := &fakeResponseWriter{ - buffer: &bytes.Buffer{}, - headers: http.Header{}, - } - if err := a.writeExport(ctx, rw, tCase.format, tCase.startTime, tCase.endTime); err != nil { - t.Fatal(err) - } - - expected, err := os.ReadFile(filepath.Join("activity", "test_fixtures", tCase.expected)) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(rw.buffer.Bytes(), expected) { - t.Fatal(rw.buffer.String()) - } - } -} - type fakeResponseWriter struct { buffer *bytes.Buffer headers http.Header @@ -3990,8 +3812,8 @@ func TestActivityLog_partialMonthClientCount(t *testing.T) { } for _, clientCount := range clientCountResponse { - if int(clientCounts[clientCount.NamespaceID]) != clientCount.Counts.DistinctEntities { - t.Errorf("bad entity count for namespace %s . expected %d, got %d", clientCount.NamespaceID, int(clientCounts[clientCount.NamespaceID]), clientCount.Counts.DistinctEntities) + if int(clientCounts[clientCount.NamespaceID]) != clientCount.Counts.EntityClients { + t.Errorf("bad entity count for namespace %s . expected %d, got %d", clientCount.NamespaceID, int(clientCounts[clientCount.NamespaceID]), clientCount.Counts.EntityClients) } totalCount := int(clientCounts[clientCount.NamespaceID]) if totalCount != clientCount.Counts.Clients { @@ -3999,12 +3821,12 @@ func TestActivityLog_partialMonthClientCount(t *testing.T) { } } - distinctEntities, ok := results["distinct_entities"] + entityClients, ok := results["entity_clients"] if !ok { t.Fatalf("malformed results. got %v", results) } - if distinctEntities != len(clients) { - t.Errorf("bad entity count. expected %d, got %d", len(clients), distinctEntities) + if entityClients != len(clients) { + t.Errorf("bad entity count. expected %d, got %d", len(clients), entityClients) } clientCount, ok := results["clients"] @@ -4067,8 +3889,8 @@ func TestActivityLog_partialMonthClientCountUsingHandleQuery(t *testing.T) { } for _, clientCount := range clientCountResponse { - if int(clientCounts[clientCount.NamespaceID]) != clientCount.Counts.DistinctEntities { - t.Errorf("bad entity count for namespace %s . expected %d, got %d", clientCount.NamespaceID, int(clientCounts[clientCount.NamespaceID]), clientCount.Counts.DistinctEntities) + if int(clientCounts[clientCount.NamespaceID]) != clientCount.Counts.EntityClients { + t.Errorf("bad entity count for namespace %s . expected %d, got %d", clientCount.NamespaceID, int(clientCounts[clientCount.NamespaceID]), clientCount.Counts.EntityClients) } totalCount := int(clientCounts[clientCount.NamespaceID]) if totalCount != clientCount.Counts.Clients { @@ -4082,9 +3904,9 @@ func TestActivityLog_partialMonthClientCountUsingHandleQuery(t *testing.T) { } totalCounts := ResponseCounts{} err = mapstructure.Decode(totals, &totalCounts) - distinctEntities := totalCounts.DistinctEntities - if distinctEntities != len(clients) { - t.Errorf("bad entity count. expected %d, got %d", len(clients), distinctEntities) + entityClients := totalCounts.EntityClients + if entityClients != len(clients) { + t.Errorf("bad entity count. expected %d, got %d", len(clients), entityClients) } clientCount := totalCounts.Clients @@ -4112,30 +3934,20 @@ func TestActivityLog_partialMonthClientCountUsingHandleQuery(t *testing.T) { if monthsResponse[0].Counts.NonEntityClients != totalCounts.NonEntityClients { t.Fatalf("wrong non-entity client count. got %v, expected %v", monthsResponse[0].Counts.NonEntityClients, totalCounts.NonEntityClients) } - if monthsResponse[0].Counts.NonEntityTokens != totalCounts.NonEntityTokens { - t.Fatalf("wrong non-entity client count. got %v, expected %v", monthsResponse[0].Counts.NonEntityTokens, totalCounts.NonEntityTokens) - } if monthsResponse[0].Counts.Clients != monthsResponse[0].NewClients.Counts.Clients { t.Fatalf("wrong client count. got %v, expected %v", monthsResponse[0].Counts.Clients, monthsResponse[0].NewClients.Counts.Clients) } - if monthsResponse[0].Counts.DistinctEntities != monthsResponse[0].NewClients.Counts.DistinctEntities { - t.Fatalf("wrong distinct entities count. got %v, expected %v", monthsResponse[0].Counts.DistinctEntities, monthsResponse[0].NewClients.Counts.DistinctEntities) - } if monthsResponse[0].Counts.EntityClients != monthsResponse[0].NewClients.Counts.EntityClients { t.Fatalf("wrong entity client count. got %v, expected %v", monthsResponse[0].Counts.EntityClients, monthsResponse[0].NewClients.Counts.EntityClients) } if monthsResponse[0].Counts.NonEntityClients != monthsResponse[0].NewClients.Counts.NonEntityClients { t.Fatalf("wrong non-entity client count. got %v, expected %v", monthsResponse[0].Counts.NonEntityClients, monthsResponse[0].NewClients.Counts.NonEntityClients) } - if monthsResponse[0].Counts.NonEntityTokens != monthsResponse[0].NewClients.Counts.NonEntityTokens { - t.Fatalf("wrong non-entity token count. got %v, expected %v", monthsResponse[0].Counts.NonEntityTokens, monthsResponse[0].NewClients.Counts.NonEntityTokens) - } - namespaceResponseMonth := monthsResponse[0].Namespaces for _, clientCount := range namespaceResponseMonth { if int(clientCounts[clientCount.NamespaceID]) != clientCount.Counts.EntityClients { - t.Errorf("bad entity count for namespace %s . expected %d, got %d", clientCount.NamespaceID, int(clientCounts[clientCount.NamespaceID]), clientCount.Counts.DistinctEntities) + t.Errorf("bad entity count for namespace %s . expected %d, got %d", clientCount.NamespaceID, int(clientCounts[clientCount.NamespaceID]), clientCount.Counts.EntityClients) } totalCount := int(clientCounts[clientCount.NamespaceID]) if totalCount != clientCount.Counts.Clients { diff --git a/vault/activity_log_util_common.go b/vault/activity_log_util_common.go index ff8923a9580c..dd4314d01366 100644 --- a/vault/activity_log_util_common.go +++ b/vault/activity_log_util_common.go @@ -458,9 +458,8 @@ func (e *segmentReader) ReadEntity(ctx context.Context) (*activity.EntityActivit // namespaceRecordToCountsResponse converts the record to the ResponseCounts // type. The function sums entity, non-entity, and secret sync counts to get the -// total client count. If includeDeprecated is true, the deprecated fields -// NonEntityTokens and DistinctEntities are populated -func (a *ActivityLog) countsRecordToCountsResponse(record *activity.CountsRecord, includeDeprecated bool) *ResponseCounts { +// total client count. +func (a *ActivityLog) countsRecordToCountsResponse(record *activity.CountsRecord) *ResponseCounts { response := &ResponseCounts{ EntityClients: record.EntityClients, NonEntityClients: record.NonEntityClients, @@ -468,10 +467,6 @@ func (a *ActivityLog) countsRecordToCountsResponse(record *activity.CountsRecord SecretSyncs: record.SecretSyncs, ACMEClients: record.ACMEClients, } - if includeDeprecated { - response.NonEntityTokens = response.NonEntityClients - response.DistinctEntities = response.EntityClients - } return response } @@ -480,9 +475,7 @@ func (a *ActivityLog) countsRecordToCountsResponse(record *activity.CountsRecord // counts to get the total client count. func (a *ActivityLog) namespaceRecordToCountsResponse(record *activity.NamespaceRecord) *ResponseCounts { return &ResponseCounts{ - DistinctEntities: int(record.Entities), EntityClients: int(record.Entities), - NonEntityTokens: int(record.NonEntityTokens), NonEntityClients: int(record.NonEntityTokens), Clients: int(record.Entities + record.NonEntityTokens + record.SecretSyncs + record.ACMEClients), SecretSyncs: int(record.SecretSyncs), diff --git a/vault/audit.go b/vault/audit.go index 1487d3007ff3..9d3c57c65f73 100644 --- a/vault/audit.go +++ b/vault/audit.go @@ -160,7 +160,6 @@ func (c *Core) enableAudit(ctx context.Context, entry *MountEntry, updateStorage if err != nil { c.logger.Error("new audit backend failed test", "path", entry.Path, "type", entry.Type, "error", err) return fmt.Errorf("audit backend failed test message: %w", err) - } } diff --git a/vault/audit_test.go b/vault/audit_test.go index 1c71edb0833a..23d39723fb09 100644 --- a/vault/audit_test.go +++ b/vault/audit_test.go @@ -12,7 +12,6 @@ import ( "testing" "time" - "github.com/hashicorp/errwrap" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-uuid" "github.com/hashicorp/vault/audit" @@ -442,9 +441,9 @@ func TestAuditBroker_LogRequest(t *testing.T) { t.Fatalf("err: %v", err) } - // Should FAIL work with both failing backends + // Should FAIL when both backends fail a2.ReqErr = fmt.Errorf("failed") - if err := b.LogRequest(ctx, logInput); !errwrap.Contains(err, "event not processed by enough 'sink' nodes") { + if err = b.LogRequest(ctx, logInput); err != nil && !strings.HasPrefix(err.Error(), "event not processed by enough 'sink' nodes") { t.Fatalf("err: %v", err) } } @@ -630,8 +629,7 @@ func TestAuditBroker_AuditHeaders(t *testing.T) { // Should FAIL work with both failing backends a2.ReqErr = fmt.Errorf("failed") - err = b.LogRequest(ctx, logInput) - if !errwrap.Contains(err, "event not processed by enough 'sink' nodes") { + if err = b.LogRequest(ctx, logInput); err != nil && !strings.HasPrefix(err.Error(), "event not processed by enough 'sink' nodes") { t.Fatalf("err: %v", err) } } diff --git a/vault/census.go b/vault/census.go index fc1cf2b10851..3481adacd22e 100644 --- a/vault/census.go +++ b/vault/census.go @@ -5,12 +5,16 @@ package vault -import "time" +import ( + "context" + "time" +) + +const utilizationBasePath = "utilization" // CensusAgent is a stub for OSS type CensusReporter interface{} -func (c *Core) setupCensusManager() error { return nil } func (c *Core) BillingStart() time.Time { return time.Time{} } func (c *Core) AutomatedLicenseReportingEnabled() bool { return false } func (c *Core) CensusAgent() CensusReporter { return nil } @@ -19,3 +23,9 @@ func (c *Core) StartManualCensusSnapshots() {} func (c *Core) ManualLicenseReportingEnabled() bool { return false } func (c *Core) ManualCensusSnapshotInterval() time.Duration { return time.Duration(0) } func (c *Core) ManualCensusSnapshotRetentionTime() time.Duration { return time.Duration(0) } +func (c *Core) StartCensusReports(ctx context.Context) {} +func (c *Core) SetRetentionMonths(months int) error { return nil } +func (c *Core) ReloadCensusManager(licenseChange bool) error { return nil } +func (c *Core) parseCensusManagerConfig(conf *CoreConfig) (CensusManagerConfig, error) { + return CensusManagerConfig{}, nil +} diff --git a/vault/census_manager.go b/vault/census_manager.go new file mode 100644 index 000000000000..0bf8977b9258 --- /dev/null +++ b/vault/census_manager.go @@ -0,0 +1,45 @@ +// Copyright (c) HashiCorp, Inc. +// SPDX-License-Identifier: BUSL-1.1 + +//go:build !enterprise + +package vault + +import ( + "context" + "time" + + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/vault/sdk/logical" +) + +// CensusManager provides stub behavior for CE, simplifying the logic between CE +// and ENT. This will always be marked active: false. +type CensusManager struct { + active bool + logger hclog.Logger +} + +// CensusManagerConfig is empty on CE. +type CensusManagerConfig struct{} + +// NewCensusManager sets up the stub CensusManager on CE with active: false. +func NewCensusManager(logger hclog.Logger, conf CensusManagerConfig, storage logical.Storage) (*CensusManager, error) { + return &CensusManager{ + active: false, + logger: logger, + }, nil +} + +// setupCensusManager is a stub on CE. +func (c *Core) setupCensusManager(ctx context.Context) error { + return nil +} + +// BillingStart is a stub on CE. +func (cm *CensusManager) BillingStart() time.Time { + return time.Time{} +} + +// StartManualReportingSnapshots is a stub for CE. +func (cm *CensusManager) StartManualReportingSnapshots() {} diff --git a/vault/census_stubs_oss.go b/vault/census_stubs_oss.go deleted file mode 100644 index f52717a5c91e..000000000000 --- a/vault/census_stubs_oss.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !enterprise - -package vault - -import "context" - -//go:generate go run github.com/hashicorp/vault/tools/stubmaker - -func (c *Core) StartCensusReports(ctx context.Context) {} -func (c *Core) SetRetentionMonths(months int) error { return nil } -func (c *Core) ReloadCensusManager(licenseChange bool) error { return nil } diff --git a/vault/core.go b/vault/core.go index 4e9518cc1518..3598b5e9f830 100644 --- a/vault/core.go +++ b/vault/core.go @@ -727,6 +727,8 @@ type Core struct { periodicLeaderRefreshInterval time.Duration clusterAddrBridge *raft.ClusterAddrBridge + + censusManager *CensusManager } func (c *Core) ActiveNodeClockSkewMillis() int64 { @@ -1316,6 +1318,19 @@ func NewCore(conf *CoreConfig) (*Core, error) { c.versionHistory = make(map[string]VaultVersion) } + // Setup the Census Manager + cmConfig, err := c.parseCensusManagerConfig(conf) + if err != nil { + return nil, err + } + + cmLogger := conf.Logger.Named("reporting") + c.allLoggers = append(c.allLoggers, cmLogger) + c.censusManager, err = NewCensusManager(cmLogger, cmConfig, NewBarrierView(c.barrier, utilizationBasePath)) + if err != nil { + return nil, err + } + // Events eventsLogger := conf.Logger.Named("events") c.allLoggers = append(c.allLoggers, eventsLogger) @@ -2449,15 +2464,12 @@ func (s standardUnsealStrategy) unseal(ctx context.Context, logger log.Logger, c return err } - if err := c.setupCensusManager(); err != nil { - logger.Error("failed to instantiate the license reporting agent", "error", err) + if err := c.setupCensusManager(ctx); err != nil { + return err } - - c.StartCensusReports(ctx) - c.StartManualCensusSnapshots() - } else { - broker, err := audit.NewBroker(logger) + brokerLogger := logger.Named("audit") + broker, err := audit.NewBroker(brokerLogger) if err != nil { return err } diff --git a/vault/eventbus/bus.go b/vault/eventbus/bus.go index b45839f1ac0b..8f8b174a5245 100644 --- a/vault/eventbus/bus.go +++ b/vault/eventbus/bus.go @@ -237,6 +237,9 @@ func (bus *EventBus) subscribeInternal(ctx context.Context, namespacePathPattern var filterNode *eventlogger.Filter if cluster != nil { filterNode, err = newClusterFilterNode(bus.filters, clusterID(*cluster)) + if err != nil { + return nil, nil, err + } } else { filterNode, err = newFilterNode(namespacePathPatterns, pattern, bexprFilter) if err != nil { diff --git a/vault/external_tests/activity_testonly/acme_regeneration_test.go b/vault/external_tests/activity_testonly/acme_regeneration_test.go index 26ea54ce9c31..dbd8355f81a5 100644 --- a/vault/external_tests/activity_testonly/acme_regeneration_test.go +++ b/vault/external_tests/activity_testonly/acme_regeneration_test.go @@ -89,7 +89,6 @@ func TestACMERegeneration_RegenerateWithCurrentMonth(t *testing.T) { }) require.NoError(t, err) require.Equal(t, vault.ResponseCounts{ - NonEntityTokens: 26, NonEntityClients: 26, Clients: 43, ACMEClients: 17, @@ -102,7 +101,6 @@ func TestACMERegeneration_RegenerateWithCurrentMonth(t *testing.T) { }) require.NoError(t, err) require.Equal(t, vault.ResponseCounts{ - NonEntityTokens: 36, NonEntityClients: 36, Clients: 73, ACMEClients: 37, @@ -146,7 +144,6 @@ func TestACMERegeneration_RegenerateMuchOlder(t *testing.T) { }) require.NoError(t, err) require.Equal(t, vault.ResponseCounts{ - NonEntityTokens: 26, NonEntityClients: 26, Clients: 43, ACMEClients: 17, @@ -191,7 +188,6 @@ func TestACMERegeneration_RegeneratePreviousMonths(t *testing.T) { }) require.NoError(t, err) require.Equal(t, vault.ResponseCounts{ - NonEntityTokens: 26, NonEntityClients: 26, Clients: 43, ACMEClients: 17, diff --git a/vault/external_tests/activity_testonly/activity_testonly_test.go b/vault/external_tests/activity_testonly/activity_testonly_test.go index 251be638a0f1..39edd66aeec2 100644 --- a/vault/external_tests/activity_testonly/activity_testonly_test.go +++ b/vault/external_tests/activity_testonly/activity_testonly_test.go @@ -6,9 +6,12 @@ package activity_testonly import ( + "bytes" "context" + "encoding/csv" "encoding/json" "fmt" + "io" "math" "testing" "time" @@ -451,6 +454,163 @@ func Test_ActivityLog_MountDeduplication(t *testing.T) { }, mountSet) } +// getJSONExport is used to fetch activity export records using json format. +// The records will returned as a map keyed by client ID. +func getJSONExport(t *testing.T, client *api.Client, monthsPreviousTo int, now time.Time) (map[string]vault.ActivityLogExportRecord, error) { + t.Helper() + + resp, err := client.Logical().ReadRawWithData("sys/internal/counters/activity/export", map[string][]string{ + "start_time": {timeutil.StartOfMonth(timeutil.MonthsPreviousTo(monthsPreviousTo, now)).Format(time.RFC3339)}, + "end_time": {timeutil.EndOfMonth(now).Format(time.RFC3339)}, + "format": {"json"}, + }) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + contents, err := io.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + buf := bytes.NewBuffer(contents) + decoder := json.NewDecoder(buf) + clients := make(map[string]vault.ActivityLogExportRecord) + + for { + + var record vault.ActivityLogExportRecord + err := decoder.Decode(&record) + if err != nil { + return nil, err + } + + clients[record.ClientID] = record + + if !decoder.More() { + break + } + } + + return clients, nil +} + +// getCSVExport is used to fetch activity export records using csv format. +// The records will returned as a map keyed by client ID. +func getCSVExport(t *testing.T, client *api.Client, monthsPreviousTo int, now time.Time) (map[string]vault.ActivityLogExportRecord, error) { + t.Helper() + + resp, err := client.Logical().ReadRawWithData("sys/internal/counters/activity/export", map[string][]string{ + "start_time": {timeutil.StartOfMonth(timeutil.MonthsPreviousTo(monthsPreviousTo, now)).Format(time.RFC3339)}, + "end_time": {timeutil.EndOfMonth(now).Format(time.RFC3339)}, + "format": {"csv"}, + }) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + csvRdr := csv.NewReader(resp.Body) + clients := make(map[string]vault.ActivityLogExportRecord) + + csvRecords, err := csvRdr.ReadAll() + if err != nil { + return nil, err + } + + csvColumns := csvRecords[0] + + for i := 1; i < len(csvRecords); i++ { + recordMap := make(map[string]interface{}) + + for j, k := range csvColumns { + recordMap[k] = csvRecords[i][j] + } + + var record vault.ActivityLogExportRecord + err = mapstructure.Decode(recordMap, &record) + if err != nil { + return nil, err + } + + clients[record.ClientID] = record + } + + return clients, nil +} + +// Test_ActivityLog_Export_Sudo ensures that the export API is only accessible via +// a root token or a token with a sudo policy. +func Test_ActivityLog_Export_Sudo(t *testing.T) { + timeutil.SkipAtEndOfMonth(t) + t.Parallel() + + now := time.Now().UTC() + var err error + + cluster := minimal.NewTestSoloCluster(t, nil) + client := cluster.Cores[0].Client + _, err = client.Logical().Write("sys/internal/counters/config", map[string]interface{}{ + "enabled": "enable", + }) + require.NoError(t, err) + + rootToken := client.Token() + + _, err = clientcountutil.NewActivityLogData(client). + NewCurrentMonthData(). + NewClientsSeen(10). + Write(context.Background(), generation.WriteOptions_WRITE_ENTITIES) + + require.NoError(t, err) + + // Ensure access via root token + clients, err := getJSONExport(t, client, 1, now) + require.NoError(t, err) + require.Len(t, clients, 10) + + client.Sys().PutPolicy("non-sudo-export", ` +path "sys/internal/counters/activity/export" { + capabilities = ["read"] +} + `) + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"non-sudo-export"}, + }) + require.NoError(t, err) + + nonSudoToken := secret.Auth.ClientToken + client.SetToken(nonSudoToken) + + // Ensure no access via token without sudo access + clients, err = getJSONExport(t, client, 1, now) + require.ErrorContains(t, err, "permission denied") + + client.SetToken(rootToken) + client.Sys().PutPolicy("sudo-export", ` +path "sys/internal/counters/activity/export" { + capabilities = ["read", "sudo"] +} + `) + + secret, err = client.Auth().Token().Create(&api.TokenCreateRequest{ + Policies: []string{"sudo-export"}, + }) + require.NoError(t, err) + + sudoToken := secret.Auth.ClientToken + client.SetToken(sudoToken) + + // Ensure access via token with sudo access + clients, err = getJSONExport(t, client, 1, now) + require.NoError(t, err) + require.Len(t, clients, 10) +} + // TestHandleQuery_MultipleMounts creates a cluster with // two userpass mounts. It then tests verifies that // the total new counts are calculated within a reasonably level of accuracy for diff --git a/vault/external_tests/plugin/external_plugin_test.go b/vault/external_tests/plugin/external_plugin_test.go index d2a73a51d3f2..9f8152190153 100644 --- a/vault/external_tests/plugin/external_plugin_test.go +++ b/vault/external_tests/plugin/external_plugin_test.go @@ -696,7 +696,7 @@ func TestExternalPlugin_Database(t *testing.T) { t.Run(dbName, func(t *testing.T) { roleName := "test-role-" + dbName - cleanupContainer, connURL := postgreshelper.PrepareTestContainerWithVaultUser(t, context.Background(), "13.4-buster") + cleanupContainer, connURL := postgreshelper.PrepareTestContainerWithVaultUser(t, context.Background()) t.Cleanup(cleanupContainer) _, err := client.Logical().Write("database/config/"+dbName, map[string]interface{}{ @@ -819,7 +819,7 @@ func TestExternalPlugin_DatabaseReload(t *testing.T) { dbName := fmt.Sprintf("%s-%d", plugin.Name, 0) roleName := "test-role-" + dbName - cleanupContainer, connURL := postgreshelper.PrepareTestContainerWithVaultUser(t, context.Background(), "13.4-buster") + cleanupContainer, connURL := postgreshelper.PrepareTestContainerWithVaultUser(t, context.Background()) t.Cleanup(cleanupContainer) _, err := client.Logical().Write("database/config/"+dbName, map[string]interface{}{ @@ -1204,7 +1204,7 @@ func TestCore_UpgradePluginUsingPinnedVersion_Database(t *testing.T) { t.Fatal(err) } - cleanupPG, connURL := postgreshelper.PrepareTestContainerWithVaultUser(t, context.Background(), "13.4-buster") + cleanupPG, connURL := postgreshelper.PrepareTestContainerWithVaultUser(t, context.Background()) t.Cleanup(cleanupPG) // Mount 1.0.0 then pin to 1.0.1 diff --git a/vault/identity_store.go b/vault/identity_store.go index 8d53f4c35682..22196269c8ff 100644 --- a/vault/identity_store.go +++ b/vault/identity_store.go @@ -721,34 +721,53 @@ func (i *IdentityStore) invalidateEntityBucket(ctx context.Context, key string) } } - // If the entity is not in MemDB or if it is but differs from the - // state that's in the bucket storage entry, upsert it into MemDB. - // We've considered the use of github.com/google/go-cmp here, // but opted for sticking with reflect.DeepEqual because go-cmp // is intended for testing and is able to panic in some // situations. - if memDBEntity == nil || !reflect.DeepEqual(memDBEntity, bucketEntity) { - // The entity is not in MemDB, it's a new entity. Add it to MemDB. - err = i.upsertEntityInTxn(ctx, txn, bucketEntity, nil, false) - if err != nil { - i.logger.Error("failed to update entity in MemDB", "entity_id", bucketEntity.ID, "error", err) + if memDBEntity != nil && reflect.DeepEqual(memDBEntity, bucketEntity) { + // No changes on this entity, move on to the next one. + continue + } + + // If the entity exists in MemDB it must differ from the entity in + // the storage bucket because of above test. Blindly delete the + // current aliases associated with the MemDB entity. The correct set + // of aliases will be created in MemDB by the upsertEntityInTxn + // function. We need to do this because the upsertEntityInTxn + // function does not delete those aliases, it only creates missing + // ones. + if memDBEntity != nil { + if err := i.deleteAliasesInEntityInTxn(txn, memDBEntity, memDBEntity.Aliases); err != nil { + i.logger.Error("failed to remove entity aliases from changed entity", "entity_id", memDBEntity.ID, "error", err) return } - // If this is a performance secondary, the entity created on - // this node would have been cached in a local cache based on - // the result of the CreateEntity RPC call to the primary - // cluster. Since this invalidation is signaling that the - // entity is now in the primary cluster's storage, the locally - // cached entry can be removed. - if i.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && i.localNode.HAState() == consts.Active { - if err := i.localAliasPacker.DeleteItem(ctx, bucketEntity.ID+tmpSuffix); err != nil { - i.logger.Error("failed to clear local alias entity cache", "error", err, "entity_id", bucketEntity.ID) - return - } + if err := i.MemDBDeleteEntityByIDInTxn(txn, memDBEntity.ID); err != nil { + i.logger.Error("failed to delete changed entity", "entity_id", memDBEntity.ID, "error", err) + return } } + + err = i.upsertEntityInTxn(ctx, txn, bucketEntity, nil, false) + if err != nil { + i.logger.Error("failed to update entity in MemDB", "entity_id", bucketEntity.ID, "error", err) + return + } + + // If this is a performance secondary, the entity created on + // this node would have been cached in a local cache based on + // the result of the CreateEntity RPC call to the primary + // cluster. Since this invalidation is signaling that the + // entity is now in the primary cluster's storage, the locally + // cached entry can be removed. + if i.localNode.ReplicationState().HasState(consts.ReplicationPerformanceSecondary) && i.localNode.HAState() == consts.Active { + if err := i.localAliasPacker.DeleteItem(ctx, bucketEntity.ID+tmpSuffix); err != nil { + i.logger.Error("failed to clear local alias entity cache", "error", err, "entity_id", bucketEntity.ID) + return + } + } + } } diff --git a/vault/identity_store_test.go b/vault/identity_store_test.go index 7c826dfa0c33..da16ae6fac92 100644 --- a/vault/identity_store_test.go +++ b/vault/identity_store_test.go @@ -20,6 +20,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" "google.golang.org/protobuf/types/known/anypb" ) @@ -992,6 +993,278 @@ func TestIdentityStoreInvalidate_Entities(t *testing.T) { txn.Commit() } +// TestIdentityStoreInvalidate_EntityAliasDelete verifies that the +// invalidateEntityBucket method properly cleans up aliases from +// MemDB that are no longer associated with the entity in the +// storage bucket. +func TestIdentityStoreInvalidate_EntityAliasDelete(t *testing.T) { + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + c, _, root := TestCoreUnsealed(t) + + // Enable a No-Op auth method + c.credentialBackends["noop"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { + return &NoopBackend{ + BackendType: logical.TypeCredential, + }, nil + } + mountAccessor1 := "noop-accessor1" + mountAccessor2 := "noop-accessor2" + mountAccessor3 := "noon-accessor3" + + createMountEntry := func(path, uuid, mountAccessor string, local bool) *MountEntry { + return &MountEntry{ + Table: credentialTableType, + Path: path, + Type: "noop", + UUID: uuid, + Accessor: mountAccessor, + BackendAwareUUID: uuid + "backend", + NamespaceID: namespace.RootNamespaceID, + namespace: namespace.RootNamespace, + Local: local, + } + } + + c.auth = &MountTable{ + Type: credentialTableType, + Entries: []*MountEntry{ + createMountEntry("/noop1", "abcd", mountAccessor1, false), + createMountEntry("/noop2", "ghij", mountAccessor2, false), + createMountEntry("/noop3", "mnop", mountAccessor3, true), + }, + } + + require.NoError(t, c.setupCredentials(context.Background())) + + // Create an entity + req := &logical.Request{ + ClientToken: root, + Operation: logical.UpdateOperation, + Path: "entity", + Data: map[string]interface{}{ + "name": "alice", + }, + } + + resp, err := c.identityStore.HandleRequest(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.Contains(t, resp.Data, "id") + + entityID := resp.Data["id"].(string) + + createEntityAlias := func(name, mountAccessor string) string { + req = &logical.Request{ + ClientToken: root, + Operation: logical.UpdateOperation, + Path: "entity-alias", + Data: map[string]interface{}{ + "name": name, + "canonical_id": entityID, + "mount_accessor": mountAccessor, + }, + } + + resp, err = c.identityStore.HandleRequest(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.Contains(t, resp.Data, "id") + + return resp.Data["id"].(string) + } + + alias1ID := createEntityAlias("alias1", mountAccessor1) + alias2ID := createEntityAlias("alias2", mountAccessor2) + alias3ID := createEntityAlias("alias3", mountAccessor3) + + // Update the entity in storage only to remove alias2 then call invalidate + bucketKey := c.identityStore.entityPacker.BucketKey(entityID) + bucket, err := c.identityStore.entityPacker.GetBucket(context.Background(), bucketKey) + require.NoError(t, err) + require.NotNil(t, bucket) + + bucketEntityItem := bucket.Items[0] // since there's only 1 entity + bucketEntity, err := c.identityStore.parseEntityFromBucketItem(context.Background(), bucketEntityItem) + require.NoError(t, err) + require.NotNil(t, bucketEntity) + + replacementAliases := make([]*identity.Alias, 1) + for _, a := range bucketEntity.Aliases { + if a.ID != alias2ID { + replacementAliases[0] = a + break + } + } + + bucketEntity.Aliases = replacementAliases + + bucketEntityItem.Message, err = anypb.New(bucketEntity) + require.NoError(t, err) + + require.NoError(t, c.identityStore.entityPacker.PutItem(context.Background(), bucketEntityItem)) + + c.identityStore.Invalidate(context.Background(), bucketKey) + + alias1, err := c.identityStore.MemDBAliasByID(alias1ID, false, false) + assert.NoError(t, err) + assert.NotNil(t, alias1) + + alias2, err := c.identityStore.MemDBAliasByID(alias2ID, false, false) + assert.NoError(t, err) + assert.Nil(t, alias2) + + alias3, err := c.identityStore.MemDBAliasByID(alias3ID, false, false) + assert.NoError(t, err) + assert.NotNil(t, alias3) +} + +// TestIdentityStoreInvalidate_EntityLocalAliasDelete verifies that the +// invalidateLocalAliasesBucket method properly cleans up aliases from +// MemDB that are no longer associated with the entity in the +// storage bucket. +func TestIdentityStoreInvalidate_EntityLocalAliasDelete(t *testing.T) { + ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace) + c, _, root := TestCoreUnsealed(t) + + // Enable a No-Op auth method + c.credentialBackends["noop"] = func(context.Context, *logical.BackendConfig) (logical.Backend, error) { + return &NoopBackend{ + BackendType: logical.TypeCredential, + }, nil + } + mountAccessor1 := "noop-accessor1" + mountAccessor2 := "noop-accessor2" + mountAccessor3 := "noon-accessor3" + + createMountEntry := func(path, uuid, mountAccessor string, local bool) *MountEntry { + return &MountEntry{ + Table: credentialTableType, + Path: path, + Type: "noop", + UUID: uuid, + Accessor: mountAccessor, + BackendAwareUUID: uuid + "backend", + NamespaceID: namespace.RootNamespaceID, + namespace: namespace.RootNamespace, + Local: local, + } + } + + c.auth = &MountTable{ + Type: credentialTableType, + Entries: []*MountEntry{ + createMountEntry("/noop1", "abcd", mountAccessor1, true), + createMountEntry("/noop2", "ghij", mountAccessor2, true), + createMountEntry("/noop3", "mnop", mountAccessor3, true), + }, + } + + require.NoError(t, c.setupCredentials(context.Background())) + + // Create an entity + req := &logical.Request{ + ClientToken: root, + Operation: logical.UpdateOperation, + Path: "entity", + Data: map[string]interface{}{ + "name": "alice", + }, + } + + resp, err := c.identityStore.HandleRequest(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.Contains(t, resp.Data, "id") + + entityID := resp.Data["id"].(string) + + createEntityAlias := func(name, mountAccessor string) string { + req = &logical.Request{ + ClientToken: root, + Operation: logical.UpdateOperation, + Path: "entity-alias", + Data: map[string]interface{}{ + "name": name, + "canonical_id": entityID, + "mount_accessor": mountAccessor, + }, + } + + resp, err = c.identityStore.HandleRequest(ctx, req) + require.NoError(t, err) + require.NotNil(t, resp) + require.Contains(t, resp.Data, "id") + + return resp.Data["id"].(string) + } + + alias1ID := createEntityAlias("alias1", mountAccessor1) + alias2ID := createEntityAlias("alias2", mountAccessor2) + alias3ID := createEntityAlias("alias3", mountAccessor3) + + for i, aliasID := range []string{alias1ID, alias2ID, alias3ID} { + alias, err := c.identityStore.MemDBAliasByID(aliasID, false, false) + require.NoError(t, err, i) + require.NotNil(t, alias, i) + } + + // // Update the entity in storage only to remove alias2 then call invalidate + bucketKey := c.identityStore.entityPacker.BucketKey(entityID) + bucket, err := c.identityStore.entityPacker.GetBucket(context.Background(), bucketKey) + require.NoError(t, err) + require.NotNil(t, bucket) + + bucketEntityItem := bucket.Items[0] // since there's only 1 entity + bucketEntity, err := c.identityStore.parseEntityFromBucketItem(context.Background(), bucketEntityItem) + require.NoError(t, err) + require.NotNil(t, bucketEntity) + + bucketKey = c.identityStore.localAliasPacker.BucketKey(entityID) + bucketLocalAlias, err := c.identityStore.localAliasPacker.GetBucket(context.Background(), bucketKey) + require.NoError(t, err) + require.NotNil(t, bucketLocalAlias) + + bucketLocalAliasItem := bucketLocalAlias.Items[0] + require.Equal(t, entityID, bucketLocalAliasItem.ID) + + var localAliases identity.LocalAliases + + err = anypb.UnmarshalTo(bucketLocalAliasItem.Message, &localAliases, proto.UnmarshalOptions{}) + require.NoError(t, err) + + memDBEntity, err := c.identityStore.MemDBEntityByID(entityID, false) + require.NoError(t, err) + require.NotNil(t, memDBEntity) + + replacementAliases := make([]*identity.Alias, 0) + for _, a := range memDBEntity.Aliases { + if a.ID != alias2ID { + replacementAliases = append(replacementAliases, a) + } + } + + localAliases.Aliases = replacementAliases + + bucketLocalAliasItem.Message, err = anypb.New(&localAliases) + require.NoError(t, err) + + require.NoError(t, c.identityStore.localAliasPacker.PutItem(context.Background(), bucketLocalAliasItem)) + + c.identityStore.Invalidate(context.Background(), bucketKey) + + alias1, err := c.identityStore.MemDBAliasByID(alias1ID, false, false) + assert.NoError(t, err) + assert.NotNil(t, alias1) + + alias2, err := c.identityStore.MemDBAliasByID(alias2ID, false, false) + assert.NoError(t, err) + assert.Nil(t, alias2) + + alias3, err := c.identityStore.MemDBAliasByID(alias3ID, false, false) + assert.NoError(t, err) + assert.NotNil(t, alias3) +} + // TestIdentityStoreInvalidate_LocalAliasesWithEntity verifies the correct // handling of local aliases in the Invalidate method. func TestIdentityStoreInvalidate_LocalAliasesWithEntity(t *testing.T) { diff --git a/vault/identity_store_util.go b/vault/identity_store_util.go index 4948f48c67cf..fe41b469ff6c 100644 --- a/vault/identity_store_util.go +++ b/vault/identity_store_util.go @@ -171,6 +171,19 @@ func (i *IdentityStore) loadGroups(ctx context.Context) error { } err = i.UpsertGroupInTxn(ctx, txn, group, persist) + + if errors.Is(err, logical.ErrReadOnly) { + // This is an imperfect solution to unblock customers who are running into + // a readonly error during a DR failover (jira #28191). More specifically, if there + // are duplicate aliases in storage then they are merged during loadEntities. Vault + // attempts to remove the deleted duplicate entities from their groups to clean up. + // If the node is a PR secondary though it will fail because the RPC client + // is not yet initialized and the storage is read-only. This prevents the cluster from + // unsealing entirely and can potentially block a DR failover from succeeding. + i.logger.Warn("received a read only error while trying to upsert group to storage") + err = nil + } + if err != nil { txn.Abort() return fmt.Errorf("failed to update group in memdb: %w", err) diff --git a/vault/logical_system.go b/vault/logical_system.go index 972a3b653ed5..57efed1e60a8 100644 --- a/vault/logical_system.go +++ b/vault/logical_system.go @@ -133,6 +133,7 @@ func NewSystemBackend(core *Core, logger log.Logger, config *logical.BackendConf "storage/raft/snapshot-auto/config/*", "leases", "internal/inspect/*", + "internal/counters/activity/export", // sys/seal and sys/step-down actually have their sudo requirement enforced through hardcoding // PolicyCheckOpts.RootPrivsRequired in dedicated calls to Core.performPolicyChecks, but we still need // to declare them here so that the generated OpenAPI spec gets their sudo status correct. @@ -3913,7 +3914,7 @@ func (b *SystemBackend) handleEnableAudit(ctx context.Context, _ *logical.Reques // Attempt enabling if err := b.Core.enableAudit(ctx, me, true); err != nil { - b.Backend.Logger().Error("enable audit mount failed", "path", me.Path, "error", err) + b.Core.logger.Error("enable audit mount failed", "path", me.Path, "error", err) return handleError(audit.ConvertToExternalError(err)) } diff --git a/vault/logical_system_activity.go b/vault/logical_system_activity.go index 92feaf3ca59c..4c7b743cbf6e 100644 --- a/vault/logical_system_activity.go +++ b/vault/logical_system_activity.go @@ -5,6 +5,7 @@ package vault import ( "context" + "errors" "fmt" "net/http" "os" @@ -13,6 +14,7 @@ import ( "time" "github.com/hashicorp/go-secure-stdlib/parseutil" + "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/logical" ) @@ -94,6 +96,40 @@ func (b *SystemBackend) activityPaths() []*framework.Path { return []*framework.Path{ b.monthlyActivityCountPath(), b.activityQueryPath(), + { + Pattern: "internal/counters/activity/export$", + + DisplayAttrs: &framework.DisplayAttributes{ + OperationPrefix: "internal-client-activity", + OperationVerb: "export", + }, + + Fields: map[string]*framework.FieldSchema{ + "start_time": { + Type: framework.TypeTime, + Description: "Start of query interval", + }, + "end_time": { + Type: framework.TypeTime, + Description: "End of query interval", + }, + "format": { + Type: framework.TypeString, + Description: "Format of the file. Either a CSV or a JSON file with an object per line.", + Default: "json", + }, + }, + + HelpSynopsis: strings.TrimSpace(sysHelp["activity-export"][0]), + HelpDescription: strings.TrimSpace(sysHelp["activity-export"][1]), + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.handleClientExport, + Summary: "Returns a deduplicated export of all clients that had activity within the provided start and end times for this namespace and all child namespaces.", + }, + }, + }, } } @@ -240,15 +276,27 @@ func (b *SystemBackend) handleClientExport(ctx context.Context, req *logical.Req } } - runCtx, cancelFunc := context.WithTimeout(b.Core.activeContext, timeout) + ns, err := namespace.FromContext(ctx) + if err != nil { + return nil, err + } + + nsActiveContext := namespace.ContextWithNamespace(b.Core.activeContext, ns) + runCtx, cancelFunc := context.WithTimeout(nsActiveContext, timeout) defer cancelFunc() err = a.writeExport(runCtx, req.ResponseWriter, d.Get("format").(string), startTime, endTime) if err != nil { - return nil, err + if errors.Is(err, ErrActivityExportNoDataInRange) || errors.Is(err, ErrActivityExportInProgress) || strings.HasPrefix(err.Error(), ActivityExportInvalidFormatPrefix) { + return logical.ErrorResponse(err.Error()), nil + } else { + return nil, err + } } - return nil, nil + // default status to 204, this will get rewritten to 200 later if the export writes data to req.ResponseWriter + respNoContent, err := logical.RespondWithStatusCode(&logical.Response{}, req, http.StatusNoContent) + return respNoContent, err } func (b *SystemBackend) handleClientMetricQuery(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) { diff --git a/vault/logical_system_activity_write_testonly.go b/vault/logical_system_activity_write_testonly.go index 40671595e738..d5316f2d1ea7 100644 --- a/vault/logical_system_activity_write_testonly.go +++ b/vault/logical_system_activity_write_testonly.go @@ -47,6 +47,8 @@ func (b *SystemBackend) activityWritePath() *framework.Path { } func (b *SystemBackend) handleActivityWriteData(ctx context.Context, request *logical.Request, data *framework.FieldData) (*logical.Response, error) { + now := time.Now().UTC() + json := data.Get("input") input := &generation.ActivityLogMockInput{} err := protojson.Unmarshal([]byte(json.(string)), input) @@ -73,7 +75,7 @@ func (b *SystemBackend) handleActivityWriteData(ctx context.Context, request *lo } generated := newMultipleMonthsActivityClients(numMonths + 1) for _, month := range input.Data { - err := generated.processMonth(ctx, b.Core, month) + err := generated.processMonth(ctx, b.Core, month, now) if err != nil { return logical.ErrorResponse("failed to process data for month %d", month.GetMonthsAgo()), err } @@ -83,7 +85,7 @@ func (b *SystemBackend) handleActivityWriteData(ctx context.Context, request *lo for _, opt := range input.Write { opts[opt] = struct{}{} } - paths, err := generated.write(ctx, opts, b.Core.activityLog) + paths, err := generated.write(ctx, opts, b.Core.activityLog, now) if err != nil { b.logger.Debug("failed to write activity log data", "error", err.Error()) return logical.ErrorResponse("failed to write data"), err @@ -185,12 +187,14 @@ func (s *singleMonthActivityClients) populateSegments() (map[int][]*activity.Ent // addNewClients generates clients according to the given parameters, and adds them to the month // the client will always have the mountAccessor as its mount accessor -func (s *singleMonthActivityClients) addNewClients(c *generation.Client, mountAccessor string, segmentIndex *int) error { +func (s *singleMonthActivityClients) addNewClients(c *generation.Client, mountAccessor string, segmentIndex *int, monthsAgo int32, now time.Time) error { count := 1 if c.Count > 1 { count = int(c.Count) } isNonEntity := c.ClientType != entityActivityType + ts := timeutil.MonthsPreviousTo(int(monthsAgo), now) + for i := 0; i < count; i++ { record := &activity.EntityRecord{ ClientID: c.Id, @@ -198,6 +202,7 @@ func (s *singleMonthActivityClients) addNewClients(c *generation.Client, mountAc MountAccessor: mountAccessor, NonEntity: isNonEntity, ClientType: c.ClientType, + Timestamp: ts.Unix(), } if record.ClientID == "" { var err error @@ -212,7 +217,7 @@ func (s *singleMonthActivityClients) addNewClients(c *generation.Client, mountAc } // processMonth populates a month of client data -func (m *multipleMonthsActivityClients) processMonth(ctx context.Context, core *Core, month *generation.Data) error { +func (m *multipleMonthsActivityClients) processMonth(ctx context.Context, core *Core, month *generation.Data, now time.Time) error { // default to using the root namespace and the first mount on the root namespace mounts, err := core.ListMounts() if err != nil { @@ -275,7 +280,7 @@ func (m *multipleMonthsActivityClients) processMonth(ctx context.Context, core * } } - err = m.addClientToMonth(month.GetMonthsAgo(), clients, mountAccessor, segmentIndex) + err = m.addClientToMonth(month.GetMonthsAgo(), clients, mountAccessor, segmentIndex, now) if err != nil { return err } @@ -301,11 +306,11 @@ func (m *multipleMonthsActivityClients) processMonth(ctx context.Context, core * return nil } -func (m *multipleMonthsActivityClients) addClientToMonth(monthsAgo int32, c *generation.Client, mountAccessor string, segmentIndex *int) error { +func (m *multipleMonthsActivityClients) addClientToMonth(monthsAgo int32, c *generation.Client, mountAccessor string, segmentIndex *int, now time.Time) error { if c.Repeated || c.RepeatedFromMonth > 0 { return m.addRepeatedClients(monthsAgo, c, mountAccessor, segmentIndex) } - return m.months[monthsAgo].addNewClients(c, mountAccessor, segmentIndex) + return m.months[monthsAgo].addNewClients(c, mountAccessor, segmentIndex, monthsAgo, now) } func (m *multipleMonthsActivityClients) addRepeatedClients(monthsAgo int32, c *generation.Client, mountAccessor string, segmentIndex *int) error { @@ -351,8 +356,7 @@ func (m *multipleMonthsActivityClients) timestampForMonth(i int, now time.Time) return now } -func (m *multipleMonthsActivityClients) write(ctx context.Context, opts map[generation.WriteOptions]struct{}, activityLog *ActivityLog) ([]string, error) { - now := time.Now().UTC() +func (m *multipleMonthsActivityClients) write(ctx context.Context, opts map[generation.WriteOptions]struct{}, activityLog *ActivityLog, now time.Time) ([]string, error) { paths := []string{} _, writePQ := opts[generation.WriteOptions_WRITE_PRECOMPUTED_QUERIES] diff --git a/vault/logical_system_activity_write_testonly_test.go b/vault/logical_system_activity_write_testonly_test.go index e7291ed0bddc..5254c0aaed6b 100644 --- a/vault/logical_system_activity_write_testonly_test.go +++ b/vault/logical_system_activity_write_testonly_test.go @@ -170,7 +170,7 @@ func Test_singleMonthActivityClients_addNewClients(t *testing.T) { m := &singleMonthActivityClients{ predefinedSegments: make(map[int][]int), } - err := m.addNewClients(tt.clients, tt.mount, tt.segmentIndex) + err := m.addNewClients(tt.clients, tt.mount, tt.segmentIndex, 0, time.Now().UTC()) require.NoError(t, err) numNew := tt.clients.Count if numNew == 0 { @@ -275,7 +275,7 @@ func Test_multipleMonthsActivityClients_processMonth(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { m := newMultipleMonthsActivityClients(tt.numMonths) - err := m.processMonth(context.Background(), core, tt.clients) + err := m.processMonth(context.Background(), core, tt.clients, time.Now().UTC()) if tt.wantError { require.Error(t, err) } else { @@ -320,7 +320,7 @@ func Test_multipleMonthsActivityClients_processMonth_segmented(t *testing.T) { } m := newMultipleMonthsActivityClients(1) core, _, _ := TestCoreUnsealed(t) - require.NoError(t, m.processMonth(context.Background(), core, data)) + require.NoError(t, m.processMonth(context.Background(), core, data, time.Now().UTC())) require.Len(t, m.months[0].predefinedSegments, 3) require.Len(t, m.months[0].clients, 3) @@ -339,13 +339,15 @@ func Test_multipleMonthsActivityClients_processMonth_segmented(t *testing.T) { // from 1 month ago and 2 months ago, and verifies that the correct clients are // added based on namespace, mount, and non-entity attributes func Test_multipleMonthsActivityClients_addRepeatedClients(t *testing.T) { + now := time.Now().UTC() + m := newMultipleMonthsActivityClients(3) defaultMount := "default" - require.NoError(t, m.addClientToMonth(2, &generation.Client{Count: 2}, "identity", nil)) - require.NoError(t, m.addClientToMonth(2, &generation.Client{Count: 2, Namespace: "other_ns"}, defaultMount, nil)) - require.NoError(t, m.addClientToMonth(1, &generation.Client{Count: 2}, defaultMount, nil)) - require.NoError(t, m.addClientToMonth(1, &generation.Client{Count: 2, ClientType: "non-entity"}, defaultMount, nil)) + require.NoError(t, m.addClientToMonth(2, &generation.Client{Count: 2}, "identity", nil, now)) + require.NoError(t, m.addClientToMonth(2, &generation.Client{Count: 2, Namespace: "other_ns"}, defaultMount, nil, now)) + require.NoError(t, m.addClientToMonth(1, &generation.Client{Count: 2}, defaultMount, nil, now)) + require.NoError(t, m.addClientToMonth(1, &generation.Client{Count: 2, ClientType: "non-entity"}, defaultMount, nil, now)) month2Clients := m.months[2].clients month1Clients := m.months[1].clients diff --git a/vault/testing.go b/vault/testing.go index a3e57b80ecc9..e007ec008c3e 100644 --- a/vault/testing.go +++ b/vault/testing.go @@ -31,6 +31,7 @@ import ( "time" "github.com/armon/go-metrics" + "github.com/golang/protobuf/ptypes" "github.com/hashicorp/go-cleanhttp" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-secure-stdlib/reloadutil" @@ -39,8 +40,10 @@ import ( "github.com/hashicorp/vault/audit" "github.com/hashicorp/vault/command/server" "github.com/hashicorp/vault/helper/constants" + "github.com/hashicorp/vault/helper/identity" "github.com/hashicorp/vault/helper/metricsutil" "github.com/hashicorp/vault/helper/namespace" + "github.com/hashicorp/vault/helper/storagepacker" "github.com/hashicorp/vault/helper/testhelpers/corehelpers" "github.com/hashicorp/vault/helper/testhelpers/pluginhelpers" "github.com/hashicorp/vault/internalshared/configutil" @@ -57,6 +60,7 @@ import ( "github.com/mitchellh/copystructure" "golang.org/x/crypto/ed25519" "golang.org/x/net/http2" + "google.golang.org/protobuf/types/known/anypb" ) // This file contains a number of methods that are useful for unit @@ -2193,3 +2197,85 @@ var ( _ testcluster.VaultCluster = &TestCluster{} _ testcluster.VaultClusterNode = &TestClusterCore{} ) + +// TestCreateDuplicateEntityAliasesInStorage creates n entities with a duplicate alias in storage +// This should only be used in testing +func TestCreateDuplicateEntityAliasesInStorage(ctx context.Context, c *Core, n int) ([]string, error) { + userpassMe := &MountEntry{ + Table: credentialTableType, + Path: "userpass/", + Type: "userpass", + Description: "userpass", + Accessor: "userpass1", + } + err := c.enableCredential(namespace.RootContext(nil), userpassMe) + if err != nil { + return nil, err + } + + var entityIDs []string + for i := 0; i < n; i++ { + entityID := fmt.Sprintf("e%d", i) + entityIDs = append(entityIDs, entityID) + a := &identity.Alias{ + ID: entityID, + CanonicalID: entityID, + MountType: "userpass", + MountAccessor: userpassMe.Accessor, + Name: "alias-dup", + } + e := &identity.Entity{ + ID: entityID, + Name: "entity-dup", + Aliases: []*identity.Alias{ + a, + }, + NamespaceID: namespace.RootNamespaceID, + BucketKey: c.identityStore.entityPacker.BucketKey(entityID), + } + + entity, err := ptypes.MarshalAny(e) + if err != nil { + return nil, err + } + item := &storagepacker.Item{ + ID: e.ID, + Message: entity, + } + if err = c.identityStore.entityPacker.PutItem(ctx, item); err != nil { + return nil, err + } + } + + return entityIDs, nil +} + +// TestCreateStorageGroup creates a group in storage only to bypass checks that the entities exist in memdb +// Should only be used in testing +func TestCreateStorageGroup(ctx context.Context, c *Core, entityIDs []string) error { + // generate random int + i := mathrand.Intn(100) + + key := fmt.Sprintf("testgroupid-%d", i) + + group := &identity.Group{ + ID: key, + Name: "testgroupname", + Policies: []string{"testgrouppolicy"}, + MemberEntityIDs: entityIDs, + BucketKey: c.identityStore.groupPacker.BucketKey(key), + } + groupAsAny, err := anypb.New(group) + if err != nil { + return err + } + item := &storagepacker.Item{ + ID: group.ID, + Message: groupAsAny, + } + err = c.identityStore.groupPacker.PutItem(ctx, item) + if err != nil { + return err + } + return nil +} diff --git a/website/content/api-docs/index.mdx b/website/content/api-docs/index.mdx index 26b532ca6d43..a5a4166387df 100644 --- a/website/content/api-docs/index.mdx +++ b/website/content/api-docs/index.mdx @@ -54,6 +54,8 @@ in periods. Otherwise, Vault will return a 404 unsupported path error. ## Namespaces +@include 'alerts/enterprise-and-hcp.mdx' + When using [Namespaces](/vault/docs/enterprise/namespaces) the final path of the API request is relative to the `X-Vault-Namespace` header. For instance, if a request URI is `secret/foo` with the `X-Vault-Namespace` header set as `ns1/ns2/`, @@ -83,6 +85,19 @@ $ curl \ http://127.0.0.1:8200/v1/ns1/ns2/secret/foo ``` + + +When you are working with HCP Vault Dedicated, your request must specify the +target namespace. In absence of an explicit namespace, Vault tries to send +the request to `root` namespace which results in an error. + +The top-level namespace for HCP Vault Dedicated clusters is `admin`, so the +requests must include `-H "X-Vault-Namespace: admin"` header or `admin` in the +API endpoint path. + + + + ## API operations Typically the request data, body and response data to and from Vault is in JSON. diff --git a/website/content/api-docs/secret/databases/mssql.mdx b/website/content/api-docs/secret/databases/mssql.mdx index 888968cfc986..3642b1dafa96 100644 --- a/website/content/api-docs/secret/databases/mssql.mdx +++ b/website/content/api-docs/secret/databases/mssql.mdx @@ -127,3 +127,10 @@ list the plugin does not support that statement type. base64-encoded semicolon-separated string, a serialized JSON string array, or a base64-encoded serialized JSON string array. The `{{name}}` value will be substituted. If not provided defaults to a generic drop user statement. + +- `rotation_statements` `(list: [])` – Specifies the database statements to be + executed to rotate the password for a given username. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The `{{name}}` and `{{password}}` values will be substituted. The + generated password will be a random alphanumeric 20 character string. diff --git a/website/content/api-docs/secret/databases/oracle.mdx b/website/content/api-docs/secret/databases/oracle.mdx index 69539e5f494f..4cc4cc174851 100644 --- a/website/content/api-docs/secret/databases/oracle.mdx +++ b/website/content/api-docs/secret/databases/oracle.mdx @@ -130,3 +130,10 @@ list the plugin does not support that statement type. base64-encoded semicolon-separated string, a serialized JSON string array, or a base64-encoded serialized JSON string array. The `{{name}}` value will be substituted. If not provided defaults to a generic drop user statement. + +- `rotation_statements` `(list: [])` – Specifies the database statements to be + executed to rotate the password for a given username. Must be a + semicolon-separated string, a base64-encoded semicolon-separated string, a + serialized JSON string array, or a base64-encoded serialized JSON string + array. The `{{name}}` and `{{password}}` values will be substituted. The + generated password will be a random alphanumeric 20 character string. diff --git a/website/content/api-docs/system/internal-counters.mdx b/website/content/api-docs/system/internal-counters.mdx index 6e12f44c6f1a..27cc7a5767d3 100644 --- a/website/content/api-docs/system/internal-counters.mdx +++ b/website/content/api-docs/system/internal-counters.mdx @@ -239,14 +239,6 @@ Vault lead to the new clients for each month. } ``` -- The `distinct_entities` field name has been deprecated since Vault 1.10. Refer to -`entity_clients` field instead. The `distinct_entities` field is currently -returned by the API for backward compatibility and it may be removed in the future. - -- The `non_entity_tokens` field name has been deprecated since Vault 1.10. Refer to -`non_entity_clients` field instead. The `non_entity_tokens` field is currently -returned by the API for backward compatibility, and it may be removed in the future. - - If the `end_date` supplied to the API is for the current month, the activity information returned by this API will only be till the previous month. The activity system is designed to process the accumulated activity only at the end @@ -269,9 +261,7 @@ That is to say, the response will appear as follows. { "timestamp":"current_month_timestamp", "counts":{ - "distinct_entities":"exact int value", "entity_clients":"exact int value", - "non_entity_tokens":"exact int value", "non_entity_clients":"exact int value", "secret_syncs":"exact int value", "acme_clients":"exact int value", @@ -282,9 +272,7 @@ That is to say, the response will appear as follows. "namespace_id":"root", "namespace_path":"path", "counts":{ - "distinct_entities":"exact int value", "entity_clients":"exact int value", - "non_entity_tokens":"exact int value", "non_entity_clients":"exact int value", "secret_syncs":"exact int value", "acme_clients":"exact int value", @@ -294,9 +282,7 @@ That is to say, the response will appear as follows. { "path":"auth/up2/", "counts":{ - "distinct_entities":"exact int value", "entity_clients":"exact int value", - "non_entity_tokens":"exact int value", "non_entity_clients":"exact int value", "secret_syncs":"exact int value", "acme_clients":"exact int value", @@ -308,9 +294,7 @@ That is to say, the response will appear as follows. ], "new_clients":{ "counts":{ - "distinct_entities":"approx int value", "entity_clients":"approx int value", - "non_entity_tokens":"approx int value", "non_entity_clients":"approx int value", "secret_syncs":"approx int value", "acme_clients":"approx int value", @@ -381,9 +365,7 @@ $ curl \ "namespace_id":"root", "namespace_path":"", "counts":{ - "distinct_entities":20, "entity_clients":20, - "non_entity_tokens":10, "non_entity_clients":10, "secret_syncs": 5, "acme_clients": 3, @@ -393,9 +375,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":10, "entity_clients":10, - "non_entity_tokens":10, "non_entity_clients":10, "secret_syncs": 0, "acme_clients": 0, @@ -405,9 +385,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs": 0, "acme_clients": 0, @@ -417,9 +395,7 @@ $ curl \ { "path":"secrets/kv1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":3, "acme_clients": 0, @@ -429,9 +405,7 @@ $ curl \ { "path":"secrets/kv2/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients": 0, @@ -441,9 +415,7 @@ $ curl \ { "path":"secrets/pki/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients": 3, @@ -456,9 +428,7 @@ $ curl \ "namespace_id":"s07UR", "namespace_path":"ns1/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -468,9 +438,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -480,9 +448,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":0, @@ -497,9 +463,7 @@ $ curl \ { "timestamp":"2021-05-01T00:00:00Z", "counts":{ - "distinct_entities":20, "entity_clients":20, - "non_entity_tokens":10, "non_entity_clients":10, "secret_syncs":5, "acme_clients":3, @@ -510,9 +474,7 @@ $ curl \ "namespace_id":"root", "namespace_path":"", "counts":{ - "distinct_entities":15, "entity_clients":15, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":5, "acme_clients":3, @@ -522,9 +484,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":10, "entity_clients":10, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -534,9 +494,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":3, "entity_clients":3, - "non_entity_tokens":2, "non_entity_clients":2, "secret_syncs":0, "acme_clients":0, @@ -546,9 +504,7 @@ $ curl \ { "path":"secrets/kv1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":3, "acme_clients":0, @@ -558,9 +514,7 @@ $ curl \ { "path":"secrets/kv2/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":2, "acme_clients":0, @@ -570,9 +524,7 @@ $ curl \ { "path":"secrets/pki/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":3, @@ -585,9 +537,7 @@ $ curl \ "namespace_id":"s07UR", "namespace_path":"ns1/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -597,9 +547,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -609,9 +557,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":0, @@ -623,9 +569,7 @@ $ curl \ ], "new_clients":{ "counts":{ - "distinct_entities":10, "entity_clients":10, - "non_entity_tokens":10, "non_entity_clients":10, "secret_syncs":2, "acme_clients":1, @@ -636,9 +580,7 @@ $ curl \ "namespace_id":"root", "namespace_path":"", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":2, "acme_clients":1, @@ -648,9 +590,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":5, "non_entity_clients":5, "clients":5 } @@ -658,9 +598,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":0, "non_entity_clients":0, "clients":5 } @@ -668,9 +606,7 @@ $ curl \ { "path":"secrets/kv1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":1, "clients":1 @@ -679,9 +615,7 @@ $ curl \ { "path":"secrets/kv2/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":1, "clients":1 @@ -690,9 +624,7 @@ $ curl \ { "path":"secrets/pki/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":1, @@ -705,9 +637,7 @@ $ curl \ "namespace_id":"s07UR", "namespace_path":"ns1/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -717,9 +647,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -729,9 +657,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":0, @@ -746,9 +672,7 @@ $ curl \ { "timestamp":"2021-04-01T00:00:00Z", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":3, "acme_clients":1, @@ -759,9 +683,7 @@ $ curl \ "namespace_id":"root", "namespace_path":"", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -771,9 +693,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -783,9 +703,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":0, @@ -797,9 +715,7 @@ $ curl \ ], "new_clients":{ "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":3, "acme_clients":1, @@ -810,9 +726,7 @@ $ curl \ "namespace_id":"root", "namespace_path":"", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":3, "acme_clients":1, @@ -822,9 +736,7 @@ $ curl \ { "path":"auth/up1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":5, "non_entity_clients":5, "secret_syncs":0, "acme_clients":0, @@ -834,9 +746,7 @@ $ curl \ { "path":"auth/up2/", "counts":{ - "distinct_entities":5, "entity_clients":5, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":0, @@ -846,9 +756,7 @@ $ curl \ { "path":"secrets/kv1/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":2, "acme_clients":0, @@ -858,9 +766,7 @@ $ curl \ { "path":"secrets/kv2/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":1, "acme_clients":0, @@ -870,9 +776,7 @@ $ curl \ { "path":"secrets/pki/", "counts":{ - "distinct_entities":0, "entity_clients":0, - "non_entity_tokens":0, "non_entity_clients":0, "secret_syncs":0, "acme_clients":1, @@ -887,9 +791,7 @@ $ curl \ ], "start_time":"2021-01-01T00:00:00Z", "total":{ - "distinct_entities":20, "entity_clients":20, - "non_entity_tokens":20, "non_entity_clients":20, "secret_syncs":5, "acme_clients":3, @@ -965,10 +867,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 1, "secret_syncs": 0 }, "mounts": [ @@ -976,10 +876,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "mount_path": "auth_token_0747d59c" @@ -990,17 +888,14 @@ $ curl \ } ], "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "months": [ { "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "namespaces": [ @@ -1008,10 +903,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "mounts": [ @@ -1019,10 +912,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "mount_path": "auth_token_0747d59c" @@ -1036,10 +927,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "namespaces": [ @@ -1047,10 +936,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "mounts": [ @@ -1058,10 +945,8 @@ $ curl \ "counts": { "acme_clients": 0, "clients": 1, - "distinct_entities": 0, "entity_clients": 0, "non_entity_clients": 1, - "non_entity_tokens": 0, "secret_syncs": 0 }, "mount_path": "auth_token_0747d59c" @@ -1076,7 +961,6 @@ $ curl \ } ], "non_entity_clients": 1, - "non_entity_tokens": 1, "secret_syncs": 0 }, "warnings": null diff --git a/website/content/docs/agent-and-proxy/proxy/caching/static-secret-caching.mdx b/website/content/docs/agent-and-proxy/proxy/caching/static-secret-caching.mdx index ed4385824cbd..3f7b0417facd 100644 --- a/website/content/docs/agent-and-proxy/proxy/caching/static-secret-caching.mdx +++ b/website/content/docs/agent-and-proxy/proxy/caching/static-secret-caching.mdx @@ -146,7 +146,7 @@ the cache or forwarded from Vault. In the event of a hit, Proxy also sets the The top level `cache` block has the following configuration entries relating to static secret caching: - `cache_static_secrets` `(bool: false)` - Enables static secret caching when -set to `true`. When `cache_static_secrets` and `auth_auth` are both enabled, +set to `true`. When `cache_static_secrets` and `auto_auth` are both enabled, Vault Proxy serves KV secrets directly from the cache to clients with sufficient permission. diff --git a/website/content/docs/commands/token/capabilities.mdx b/website/content/docs/commands/token/capabilities.mdx index dc26fdbad2d5..265f4707ab1b 100644 --- a/website/content/docs/commands/token/capabilities.mdx +++ b/website/content/docs/commands/token/capabilities.mdx @@ -11,27 +11,33 @@ description: |- The `token capabilities` command fetches the capabilities of a token for a given path. -If a TOKEN is provided as an argument, this command uses the "/sys/capabilities" -endpoint and permission. If no TOKEN is provided, this command uses the -"/sys/capabilities-self" endpoint and permission with the locally authenticated -token. +If you pass a token value as an argument, this command uses the +`/sys/capabilities` endpoint and permission. In the absence of an explicit token +value, this command uses the `/sys/capabilities-self` endpoint and permission +with the locally authenticated token. ## Examples -List capabilities for the local token on the "secret/foo" path: +List capabilities for the local token on the `secret/foo` path: ```shell-session $ vault token capabilities secret/foo read ``` -List capabilities for a token on the "cubbyhole/foo" path: +The output shows the local token has read permission on the `secret/foo` path. + +List capabilities for a token (`hvs.CAESI...WtiSW5mWUY`) on the `cubbyhole/foo` +path: ```shell-session -$ vault token capabilities 96ddf4bc-d217-f3ba-f9bd-017055595017 database/creds/readonly +$ vault token capabilities hvs.CAESI...WtiSW5mWUY database/creds/readonly deny ``` +The output shows the token (`hvs.CAESI...WtiSW5mWUY`) has no permission to +operate on the `cubbyhole/foo` path. + ## Usage The following flags are available in addition to the [standard set of diff --git a/website/content/docs/concepts/policies.mdx b/website/content/docs/concepts/policies.mdx index f01b0bf8b8bf..6c9bd3d1143c 100644 --- a/website/content/docs/concepts/policies.mdx +++ b/website/content/docs/concepts/policies.mdx @@ -193,6 +193,11 @@ wildcard appears in the same place, both end in `*` and the latter has two wildc segments while the former has zero. So we end at rule (3), and give `"secret/+/+/foo/*"` _lower_ priority. +Another example utilizing Vault [namespaces](/vault/docs/enterprise/namespaces), given [nested](/vault/tutorials/enterprise/namespace-structure) namespaces `ns1/ns2/ns3` and two paths, +`"secret/*"` and `"ns1/ns2/ns3/secret/apps/*"` where `secret` is a mountpoint in namespace `ns3`. The first path is +defined in a policy inside/relative to namespace `ns3` while the second path is defined in a policy in the `root` namespace. +Both paths end in `*` but the first is shorter. So we end at rule (4), and give `"secret/*"` _lower_ priority. + !> **Informational:**The glob character referred to in this documentation is the asterisk (`*`). It _is not a regular expression_ and is only supported **as the last character of the path**! diff --git a/website/content/docs/configuration/service-registration/kubernetes.mdx b/website/content/docs/configuration/service-registration/kubernetes.mdx index 2e7af4a47079..14728f9a476e 100644 --- a/website/content/docs/configuration/service-registration/kubernetes.mdx +++ b/website/content/docs/configuration/service-registration/kubernetes.mdx @@ -71,7 +71,7 @@ metadata: vault-initialized: "true" vault-perf-standby: "false" vault-sealed: "false" - vault-version: 1.16.1 + vault-version: 1.17.2 ``` After shutdowns, Vault pods will bear the following labels: @@ -86,7 +86,7 @@ metadata: vault-initialized: "false" vault-perf-standby: "false" vault-sealed: "true" - vault-version: 1.16.1 + vault-version: 1.17.2 ``` ## Label definitions @@ -102,7 +102,7 @@ metadata: - `vault-sealed` `(string: "true"/"false")` – Vault sealed is updated dynamically each time Vault's sealed/unsealed status changes. True indicates that Vault is currently sealed. False indicates that Vault is currently unsealed. -- `vault-version` `(string: "1.16.1")` – Vault version is a string that will not change during a pod's lifecycle. +- `vault-version` `(string: "1.17.2")` – Vault version is a string that will not change during a pod's lifecycle. ## Working with vault's service discovery labels @@ -118,7 +118,7 @@ metadata: labels: app.kubernetes.io/instance: vault app.kubernetes.io/name: vault - helm.sh/chart: vault-0.1.2 + helm.sh/chart: vault-0.28.1 name: vault-active-us-east namespace: default spec: @@ -156,7 +156,7 @@ $ vault write -f sys/replication/performance/primary/enable \ In conjunction with the pod labels and the `OnDelete` upgrade strategy, upgrades are much easier to orchestrate: ```shell-session -$ helm upgrade vault --set='server.image.tag=1.16.1' +$ helm upgrade vault --set='server.image.tag=1.17.2' $ kubectl delete pod --selector=vault-active=false \ --selector=vault-version=1.2.3 diff --git a/website/content/docs/platform/k8s/helm/configuration.mdx b/website/content/docs/platform/k8s/helm/configuration.mdx index ecf71217da27..beeb08a763fb 100644 --- a/website/content/docs/platform/k8s/helm/configuration.mdx +++ b/website/content/docs/platform/k8s/helm/configuration.mdx @@ -79,7 +79,7 @@ and consider if they're appropriate for your deployment. - `repository` (`string: "hashicorp/vault-k8s"`) - The name of the Docker image for Vault Agent Injector. - - `tag` (`string: "1.4.1"`) - The tag of the Docker image for the Vault Agent Injector. **This should be pinned to a specific version when running in production.** Otherwise, other changes to the chart may inadvertently upgrade your admission controller. + - `tag` (`string: "1.4.2"`) - The tag of the Docker image for the Vault Agent Injector. **This should be pinned to a specific version when running in production.** Otherwise, other changes to the chart may inadvertently upgrade your admission controller. - `pullPolicy` (`string: "IfNotPresent"`) - The pull policy for container images. The default pull policy is `IfNotPresent` which causes the Kubelet to skip pulling an image if it already exists. @@ -87,7 +87,7 @@ and consider if they're appropriate for your deployment. - `repository` (`string: "hashicorp/vault"`) - The name of the Docker image for the Vault Agent sidecar. This should be set to the official Vault Docker image. - - `tag` (`string: "1.16.1"`) - The tag of the Vault Docker image to use for the Vault Agent Sidecar. **Vault 1.3.1+ is required by the admission controller**. + - `tag` (`string: "1.17.2"`) - The tag of the Vault Docker image to use for the Vault Agent Sidecar. **Vault 1.3.1+ is required by the admission controller**. - `agentDefaults` - Values that configure the injected Vault Agent containers default values. @@ -351,7 +351,7 @@ and consider if they're appropriate for your deployment. - `repository` (`string: "hashicorp/vault"`) - The name of the Docker image for the containers running Vault. - - `tag` (`string: "1.16.1"`) - The tag of the Docker image for the containers running Vault. **This should be pinned to a specific version when running in production.** Otherwise, other changes to the chart may inadvertently upgrade your admission controller. + - `tag` (`string: "1.17.2"`) - The tag of the Docker image for the containers running Vault. **This should be pinned to a specific version when running in production.** Otherwise, other changes to the chart may inadvertently upgrade your admission controller. - `pullPolicy` (`string: "IfNotPresent"`) - The pull policy for container images. The default pull policy is `IfNotPresent` which causes the Kubelet to skip pulling an image if it already exists. @@ -724,7 +724,7 @@ and consider if they're appropriate for your deployment. "sample/annotation2": "bar" ``` - - `configAnnotation` (`boolean: false`) - Add an annotation to the server configmap and the statefulset pods, `vaultproject.io/config-checksum`, that is a hash of the Vault configuration. This can be used together with an OnDelete deployment strategy to help identify which pods still need to be deleted during a deployment to pick up any configuration changes. + - `includeConfigAnnotation` (`boolean: false`) - Add an annotation to the server configmap and the statefulset pods, `vaultproject.io/config-checksum`, that is a hash of the Vault configuration. This can be used together with an OnDelete deployment strategy to help identify which pods still need to be deleted during a deployment to pick up any configuration changes. - `service` - Values that configure the Kubernetes service created for Vault. These options are also used for the `active` and `standby` services when [`ha`](#ha) is enabled. @@ -1090,7 +1090,7 @@ and consider if they're appropriate for your deployment. - `repository` (`string: "hashicorp/vault-csi-provider"`) - The name of the Docker image for the Vault CSI Provider. - - `tag` (`string: "1.4.2"`) - The tag of the Docker image for the Vault CSI Provider.. **This should be pinned to a specific version when running in production.** Otherwise, other changes to the chart may inadvertently upgrade your CSI provider. + - `tag` (`string: "1.4.3"`) - The tag of the Docker image for the Vault CSI Provider.. **This should be pinned to a specific version when running in production.** Otherwise, other changes to the chart may inadvertently upgrade your CSI provider. - `pullPolicy` (`string: "IfNotPresent"`) - The pull policy for container images. The default pull policy is `IfNotPresent` which causes the Kubelet to skip pulling an image if it already exists locally. @@ -1239,7 +1239,7 @@ and consider if they're appropriate for your deployment. - `repository` (`string: "hashicorp/vault"`) - The name of the Docker image for the Vault Agent sidecar. This should be set to the official Vault Docker image. - - `tag` (`string: "1.16.1"`) - The tag of the Vault Docker image to use for the Vault Agent Sidecar. + - `tag` (`string: "1.17.2"`) - The tag of the Vault Docker image to use for the Vault Agent Sidecar. - `logFormat` (`string: "standard"`) - - `logLevel` (`string: "info"`) - @@ -1258,7 +1258,9 @@ and consider if they're appropriate for your deployment. the `telemetry {}` stanza in the Vault configuration. See the [telemetry](/vault/docs/configuration/telemetry) [docs](/vault/docs/internals/telemetry) for more on the Vault configuration. - Currently, this chart does not support authenticating to Vault's metrics endpoint, so the following `telemetry {}` block must be included in the `listener "tcp" {}` stanza of the Vault configuration: + If authorization is not set for authenticating to Vault's metrics endpoint, + the following Vault server `telemetry{}` config must be included in the + `listener "tcp"{}` stanza of the Vault configuration: ```yaml listener "tcp" { @@ -1299,6 +1301,31 @@ and consider if they're appropriate for your deployment. - `scrapeTimeout` (`string: "10s"`) - Timeout for Prometheus scrapes. + - `tlsConfig` (`dictionary: {}`) - tlsConfig used for scraping the Vault metrics API. See the + prometheus [API + reference](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.TLSConfig) + for more details. + + ```yaml + tlsConfig: + ca: + secret: + name: vault-metrics-client + key: ca.crt + ``` + + - `authorization` (`dictionary: {}`) - Authorization used for scraping the Vault metrics API. + See the prometheus [API + reference](https://prometheus-operator.dev/docs/api-reference/api/#monitoring.coreos.com/v1.SafeAuthorization) + for more details. + + ```yaml + authorization: + credentials: + name: vault-metrics-client + key: token + ``` + - `prometheusRules` - Values that configure Prometheus rules. - `enabled` (`boolean: false`) - Deploy the PrometheusRule custom resource for AlertManager-based diff --git a/website/content/docs/platform/k8s/helm/enterprise.mdx b/website/content/docs/platform/k8s/helm/enterprise.mdx index 0cb478a1fe18..cd2520c9d327 100644 --- a/website/content/docs/platform/k8s/helm/enterprise.mdx +++ b/website/content/docs/platform/k8s/helm/enterprise.mdx @@ -33,7 +33,7 @@ In your chart overrides, set the values of [`server.image`](/vault/docs/platform server: image: repository: hashicorp/vault-enterprise - tag: 1.16.1-ent + tag: 1.17.2-ent enterpriseLicense: secretName: vault-ent-license ``` diff --git a/website/content/docs/platform/k8s/helm/examples/enterprise-dr-with-raft.mdx b/website/content/docs/platform/k8s/helm/examples/enterprise-dr-with-raft.mdx index 31aad1145aae..256a687e8b9c 100644 --- a/website/content/docs/platform/k8s/helm/examples/enterprise-dr-with-raft.mdx +++ b/website/content/docs/platform/k8s/helm/examples/enterprise-dr-with-raft.mdx @@ -23,7 +23,7 @@ First, create the primary cluster: ```shell helm install vault-primary hashicorp/vault \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.16.1-ent' \ + --set='server.image.tag=1.17.2-ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' ``` @@ -75,7 +75,7 @@ disaster recovery replication. ```shell helm install vault-secondary hashicorp/vault \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.16.1-ent' \ + --set='server.image.tag=1.17.2-ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' ``` diff --git a/website/content/docs/platform/k8s/helm/examples/enterprise-perf-with-raft.mdx b/website/content/docs/platform/k8s/helm/examples/enterprise-perf-with-raft.mdx index 5acd396fe001..78b49ad36526 100644 --- a/website/content/docs/platform/k8s/helm/examples/enterprise-perf-with-raft.mdx +++ b/website/content/docs/platform/k8s/helm/examples/enterprise-perf-with-raft.mdx @@ -23,7 +23,7 @@ First, create the primary cluster: ```shell helm install vault-primary hashicorp/vault \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.16.1-ent' \ + --set='server.image.tag=1.17.2-ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' ``` @@ -74,7 +74,7 @@ With the primary cluster created, next create a secondary cluster. ```shell helm install vault-secondary hashicorp/vault \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.16.1-ent' \ + --set='server.image.tag=1.17.2-ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' ``` diff --git a/website/content/docs/platform/k8s/helm/examples/enterprise-with-raft.mdx b/website/content/docs/platform/k8s/helm/examples/enterprise-with-raft.mdx index c6e3440043a0..fa84279e1621 100644 --- a/website/content/docs/platform/k8s/helm/examples/enterprise-with-raft.mdx +++ b/website/content/docs/platform/k8s/helm/examples/enterprise-with-raft.mdx @@ -15,7 +15,7 @@ Integrated Storage (raft) can be enabled using the `server.ha.raft.enabled` valu ```shell helm install vault hashicorp/vault \ --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.16.1-ent' \ + --set='server.image.tag=1.17.2-ent' \ --set='server.ha.enabled=true' \ --set='server.ha.raft.enabled=true' ``` diff --git a/website/content/docs/platform/k8s/helm/run.mdx b/website/content/docs/platform/k8s/helm/run.mdx index 49cd15362954..95184caaf18b 100644 --- a/website/content/docs/platform/k8s/helm/run.mdx +++ b/website/content/docs/platform/k8s/helm/run.mdx @@ -409,14 +409,14 @@ Next, list the Helm versions and choose the desired version to install. ```bash $ helm search repo hashicorp/vault NAME CHART VERSION APP VERSION DESCRIPTION -hashicorp/vault 0.28.0 1.16.1 Official HashiCorp Vault Chart +hashicorp/vault 0.28.1 1.17.2 Official HashiCorp Vault Chart ``` Next, test the upgrade with `--dry-run` first to verify the changes sent to the Kubernetes cluster. ```shell-session -$ helm upgrade vault hashicorp/vault --version=0.28.0 \ +$ helm upgrade vault hashicorp/vault --version=0.28.1 \ --set='server.image.repository=vault' \ --set='server.image.tag=123.456' \ --dry-run diff --git a/website/content/docs/platform/k8s/injector/annotations.mdx b/website/content/docs/platform/k8s/injector/annotations.mdx index 5e5514905222..1be2e3f577f7 100644 --- a/website/content/docs/platform/k8s/injector/annotations.mdx +++ b/website/content/docs/platform/k8s/injector/annotations.mdx @@ -28,7 +28,7 @@ them, optional commands to run, etc. - `vault.hashicorp.com/agent-image` - name of the Vault docker image to use. This value overrides the default image configured in the injector and is usually - not needed. Defaults to `hashicorp/vault:1.16.1`. + not needed. Defaults to `hashicorp/vault:1.17.2`. - `vault.hashicorp.com/agent-init-first` - configures the pod to run the Vault Agent init container first if `true` (last if `false`). This is useful when other init diff --git a/website/content/docs/platform/k8s/vso/api-reference.mdx b/website/content/docs/platform/k8s/vso/api-reference.mdx index cb2ccf98eb10..96c839c6bb8b 100644 --- a/website/content/docs/platform/k8s/vso/api-reference.mdx +++ b/website/content/docs/platform/k8s/vso/api-reference.mdx @@ -7,7 +7,7 @@ description: >- # API Reference @@ -27,6 +27,8 @@ Package v1beta1 contains API Schema definitions for the secrets v1beta1 API grou - [SecretTransformation](#secrettransformation) - [SecretTransformationList](#secrettransformationlist) - [VaultAuth](#vaultauth) +- [VaultAuthGlobal](#vaultauthglobal) +- [VaultAuthGlobalList](#vaultauthgloballist) - [VaultAuthList](#vaultauthlist) - [VaultConnection](#vaultconnection) - [VaultConnectionList](#vaultconnectionlist) @@ -200,6 +202,25 @@ _Appears in:_ +#### MergeStrategy + + + +MergeStrategy provides the configuration for merging HTTP headers and +parameters from the referring VaultAuth resource and its VaultAuthGlobal +resource. + + + +_Appears in:_ +- [VaultAuthGlobalRef](#vaultauthglobalref) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `headers` _string_ | Headers configures the merge strategy for HTTP headers that are included in
all Vault requests. Choices are `union`, `replace`, or `none`.

If `union` is set, the headers from the VaultAuthGlobal and VaultAuth
resources are merged. The headers from the VaultAuth always take precedence.

If `replace` is set, the first set of non-empty headers taken in order from:
VaultAuth, VaultAuthGlobal auth method, VaultGlobal default headers.

If `none` is set, the headers from the
VaultAuthGlobal resource are ignored and only the headers from the VaultAuth
resource are used. The default is `none`. | | Enum: [union replace none]
| +| `params` _string_ | Params configures the merge strategy for HTTP parameters that are included in
all Vault requests. Choices are `union`, `replace`, or `none`.

If `union` is set, the parameters from the VaultAuthGlobal and VaultAuth
resources are merged. The parameters from the VaultAuth always take
precedence.

If `replace` is set, the first set of non-empty parameters taken in order from:
VaultAuth, VaultAuthGlobal auth method, VaultGlobal default parameters.

If `none` is set, the parameters from the VaultAuthGlobal resource are ignored
and only the parameters from the VaultAuth resource are used. The default is
`none`. | | Enum: [union replace none]
| + + #### RolloutRestartTarget @@ -321,6 +342,22 @@ _Appears in:_ | `keyName` _string_ | KeyName to use for encrypt/decrypt operations via Vault Transit. | | | +#### SyncConfig + + + +SyncConfig configures sync behavior from Vault to VSO + + + +_Appears in:_ +- [VaultStaticSecretSpec](#vaultstaticsecretspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `instantUpdates` _boolean_ | InstantUpdates is a flag to indicate that event-driven updates are
enabled for this VaultStaticSecret | | | + + #### Template @@ -431,6 +468,7 @@ authenticate to Vault. _Appears in:_ +- [VaultAuthGlobalConfigAWS](#vaultauthglobalconfigaws) - [VaultAuthSpec](#vaultauthspec) | Field | Description | Default | Validation | @@ -455,6 +493,7 @@ Vault via an AppRole AuthMethod. _Appears in:_ +- [VaultAuthGlobalConfigAppRole](#vaultauthglobalconfigapprole) - [VaultAuthSpec](#vaultauthspec) | Field | Description | Default | Validation | @@ -473,6 +512,7 @@ authenticating to Vault via a GCP AuthMethod, using workload identity _Appears in:_ +- [VaultAuthGlobalConfigGCP](#vaultauthglobalconfiggcp) - [VaultAuthSpec](#vaultauthspec) | Field | Description | Default | Validation | @@ -493,6 +533,7 @@ VaultAuthConfigJWT provides VaultAuth configuration options needed for authentic _Appears in:_ +- [VaultAuthGlobalConfigJWT](#vaultauthglobalconfigjwt) - [VaultAuthSpec](#vaultauthspec) | Field | Description | Default | Validation | @@ -513,6 +554,7 @@ VaultAuthConfigKubernetes provides VaultAuth configuration options needed for au _Appears in:_ +- [VaultAuthGlobalConfigKubernetes](#vaultauthglobalconfigkubernetes) - [VaultAuthSpec](#vaultauthspec) | Field | Description | Default | Validation | @@ -523,6 +565,213 @@ _Appears in:_ | `tokenExpirationSeconds` _integer_ | TokenExpirationSeconds to set the ServiceAccount token. | 600 | Minimum: 600
| +#### VaultAuthGlobal + + + +VaultAuthGlobal is the Schema for the vaultauthglobals API + + + +_Appears in:_ +- [VaultAuthGlobalList](#vaultauthgloballist) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `secrets.hashicorp.com/v1beta1` | | | +| `kind` _string_ | `VaultAuthGlobal` | | | +| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `spec` _[VaultAuthGlobalSpec](#vaultauthglobalspec)_ | | | | + + +#### VaultAuthGlobalConfigAWS + + + + + + + +_Appears in:_ +- [VaultAuthGlobalSpec](#vaultauthglobalspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `role` _string_ | Vault role to use for authenticating | | | +| `region` _string_ | AWS Region to use for signing the authentication request | | | +| `headerValue` _string_ | The Vault header value to include in the STS signing request | | | +| `sessionName` _string_ | The role session name to use when creating a webidentity provider | | | +| `stsEndpoint` _string_ | The STS endpoint to use; if not set will use the default | | | +| `iamEndpoint` _string_ | The IAM endpoint to use; if not set will use the default | | | +| `secretRef` _string_ | SecretRef is the name of a Kubernetes Secret in the consumer's (VDS/VSS/PKI) namespace
which holds credentials for AWS. Expected keys include `access_key_id`, `secret_access_key`,
`session_token` | | | +| `irsaServiceAccount` _string_ | IRSAServiceAccount name to use with IAM Roles for Service Accounts
(IRSA), and should be annotated with "eks.amazonaws.com/role-arn". This
ServiceAccount will be checked for other EKS annotations:
eks.amazonaws.com/audience and eks.amazonaws.com/token-expiration | | | +| `namespace` _string_ | Namespace to auth to in Vault | | | +| `mount` _string_ | Mount to use when authenticating to auth method. | | | +| `params` _object (keys:string, values:string)_ | Params to use when authenticating to Vault | | | +| `headers` _object (keys:string, values:string)_ | Headers to be included in all Vault requests. | | | + + +#### VaultAuthGlobalConfigAppRole + + + + + + + +_Appears in:_ +- [VaultAuthGlobalSpec](#vaultauthglobalspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `roleId` _string_ | RoleID of the AppRole Role to use for authenticating to Vault. | | | +| `secretRef` _string_ | SecretRef is the name of a Kubernetes secret in the consumer's (VDS/VSS/PKI) namespace which
provides the AppRole Role's SecretID. The secret must have a key named `id` which holds the
AppRole Role's secretID. | | | +| `namespace` _string_ | Namespace to auth to in Vault | | | +| `mount` _string_ | Mount to use when authenticating to auth method. | | | +| `params` _object (keys:string, values:string)_ | Params to use when authenticating to Vault | | | +| `headers` _object (keys:string, values:string)_ | Headers to be included in all Vault requests. | | | + + +#### VaultAuthGlobalConfigGCP + + + + + + + +_Appears in:_ +- [VaultAuthGlobalSpec](#vaultauthglobalspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `role` _string_ | Vault role to use for authenticating | | | +| `workloadIdentityServiceAccount` _string_ | WorkloadIdentityServiceAccount is the name of a Kubernetes service
account (in the same Kubernetes namespace as the Vault*Secret referencing
this resource) which has been configured for workload identity in GKE.
Should be annotated with "iam.gke.io/gcp-service-account". | | | +| `region` _string_ | GCP Region of the GKE cluster's identity provider. Defaults to the region
returned from the operator pod's local metadata server. | | | +| `clusterName` _string_ | GKE cluster name. Defaults to the cluster-name returned from the operator
pod's local metadata server. | | | +| `projectID` _string_ | GCP project ID. Defaults to the project-id returned from the operator
pod's local metadata server. | | | +| `namespace` _string_ | Namespace to auth to in Vault | | | +| `mount` _string_ | Mount to use when authenticating to auth method. | | | +| `params` _object (keys:string, values:string)_ | Params to use when authenticating to Vault | | | +| `headers` _object (keys:string, values:string)_ | Headers to be included in all Vault requests. | | | + + +#### VaultAuthGlobalConfigJWT + + + + + + + +_Appears in:_ +- [VaultAuthGlobalSpec](#vaultauthglobalspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `role` _string_ | Role to use for authenticating to Vault. | | | +| `secretRef` _string_ | SecretRef is the name of a Kubernetes secret in the consumer's (VDS/VSS/PKI) namespace which
provides the JWT token to authenticate to Vault's JWT authentication backend. The secret must
have a key named `jwt` which holds the JWT token. | | | +| `serviceAccount` _string_ | ServiceAccount to use when creating a ServiceAccount token to authenticate to Vault's
JWT authentication backend. | | | +| `audiences` _string array_ | TokenAudiences to include in the ServiceAccount token. | | | +| `tokenExpirationSeconds` _integer_ | TokenExpirationSeconds to set the ServiceAccount token. | 600 | Minimum: 600
| +| `namespace` _string_ | Namespace to auth to in Vault | | | +| `mount` _string_ | Mount to use when authenticating to auth method. | | | +| `params` _object (keys:string, values:string)_ | Params to use when authenticating to Vault | | | +| `headers` _object (keys:string, values:string)_ | Headers to be included in all Vault requests. | | | + + +#### VaultAuthGlobalConfigKubernetes + + + + + + + +_Appears in:_ +- [VaultAuthGlobalSpec](#vaultauthglobalspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `role` _string_ | Role to use for authenticating to Vault. | | | +| `serviceAccount` _string_ | ServiceAccount to use when authenticating to Vault's
authentication backend. This must reside in the consuming secret's (VDS/VSS/PKI) namespace. | | | +| `audiences` _string array_ | TokenAudiences to include in the ServiceAccount token. | | | +| `tokenExpirationSeconds` _integer_ | TokenExpirationSeconds to set the ServiceAccount token. | 600 | Minimum: 600
| +| `namespace` _string_ | Namespace to auth to in Vault | | | +| `mount` _string_ | Mount to use when authenticating to auth method. | | | +| `params` _object (keys:string, values:string)_ | Params to use when authenticating to Vault | | | +| `headers` _object (keys:string, values:string)_ | Headers to be included in all Vault requests. | | | + + +#### VaultAuthGlobalList + + + +VaultAuthGlobalList contains a list of VaultAuthGlobal + + + + + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `apiVersion` _string_ | `secrets.hashicorp.com/v1beta1` | | | +| `kind` _string_ | `VaultAuthGlobalList` | | | +| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | | +| `items` _[VaultAuthGlobal](#vaultauthglobal) array_ | | | | + + +#### VaultAuthGlobalRef + + + +VaultAuthGlobalRef is a reference to a VaultAuthGlobal resource. A referring +VaultAuth resource can use the VaultAuthGlobal resource to share common +configuration across multiple VaultAuth resources. The VaultAuthGlobal +resource is used to store global configuration for VaultAuth resources. + + + +_Appears in:_ +- [VaultAuthSpec](#vaultauthspec) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `name` _string_ | Name of the VaultAuthGlobal resource. | | Pattern: `^([a-z0-9.-]{1,253})$`
| +| `namespace` _string_ | Namespace of the VaultAuthGlobal resource. If not provided, the namespace of
the referring VaultAuth resource is used. | | Pattern: `^([a-z0-9.-]{1,253})$`
| +| `mergeStrategy` _[MergeStrategy](#mergestrategy)_ | MergeStrategy configures the merge strategy for HTTP headers and parameters
that are included in all Vault authentication requests. | | | +| `allowDefault` _boolean_ | AllowDefault when set to true will use the default VaultAuthGlobal resource
as the default if Name is not set. The 'allow-default-globals' option must be
set on the operator's '-global-vault-auth-options' flag

The default VaultAuthGlobal search is conditional.
When a ref Namespace is set, the search for the default
VaultAuthGlobal resource is constrained to that namespace.
Otherwise, the search order is:
1. The default VaultAuthGlobal resource in the referring VaultAuth resource's
namespace.
2. The default VaultAuthGlobal resource in the Operator's namespace. | | | + + +#### VaultAuthGlobalSpec + + + +VaultAuthGlobalSpec defines the desired state of VaultAuthGlobal + + + +_Appears in:_ +- [VaultAuthGlobal](#vaultauthglobal) + +| Field | Description | Default | Validation | +| --- | --- | --- | --- | +| `allowedNamespaces` _string array_ | AllowedNamespaces Kubernetes Namespaces which are allow-listed for use with
this VaultAuthGlobal. This field allows administrators to customize which
Kubernetes namespaces are authorized to reference this resource. While Vault
will still enforce its own rules, this has the added configurability of
restricting which VaultAuthMethods can be used by which namespaces. Accepted
values: []{"*"} - wildcard, all namespaces. []{"a", "b"} - list of namespaces.
unset - disallow all namespaces except the Operator's and the referring
VaultAuthMethod's namespace, this is the default behavior. | | | +| `vaultConnectionRef` _string_ | VaultConnectionRef to the VaultConnection resource, can be prefixed with a namespace,
eg: `namespaceA/vaultConnectionRefB`. If no namespace prefix is provided it will default to
namespace of the VaultConnection CR. If no value is specified for VaultConnectionRef the
Operator will default to the `default` VaultConnection, configured in the operator's namespace. | | | +| `defaultVaultNamespace` _string_ | DefaultVaultNamespace to auth to in Vault, if not specified the namespace of the auth
method will be used. This can be used as a default Vault namespace for all
auth methods. | | | +| `defaultAuthMethod` _string_ | DefaultAuthMethod to use when authenticating to Vault. | | Enum: [kubernetes jwt appRole aws gcp]
| +| `defaultMount` _string_ | DefaultMount to use when authenticating to auth method. If not specified the mount of
the auth method configured in Vault will be used. | | | +| `params` _object (keys:string, values:string)_ | DefaultParams to use when authenticating to Vault | | | +| `headers` _object (keys:string, values:string)_ | DefaultHeaders to be included in all Vault requests. | | | +| `kubernetes` _[VaultAuthGlobalConfigKubernetes](#vaultauthglobalconfigkubernetes)_ | Kubernetes specific auth configuration, requires that the Method be set to `kubernetes`. | | | +| `appRole` _[VaultAuthGlobalConfigAppRole](#vaultauthglobalconfigapprole)_ | AppRole specific auth configuration, requires that the Method be set to `appRole`. | | | +| `jwt` _[VaultAuthGlobalConfigJWT](#vaultauthglobalconfigjwt)_ | JWT specific auth configuration, requires that the Method be set to `jwt`. | | | +| `aws` _[VaultAuthGlobalConfigAWS](#vaultauthglobalconfigaws)_ | AWS specific auth configuration, requires that Method be set to `aws`. | | | +| `gcp` _[VaultAuthGlobalConfigGCP](#vaultauthglobalconfiggcp)_ | GCP specific auth configuration, requires that Method be set to `gcp`. | | | + + + + #### VaultAuthList @@ -555,6 +804,7 @@ _Appears in:_ | Field | Description | Default | Validation | | --- | --- | --- | --- | | `vaultConnectionRef` _string_ | VaultConnectionRef to the VaultConnection resource, can be prefixed with a namespace,
eg: `namespaceA/vaultConnectionRefB`. If no namespace prefix is provided it will default to
namespace of the VaultConnection CR. If no value is specified for VaultConnectionRef the
Operator will default to the `default` VaultConnection, configured in the operator's namespace. | | | +| `vaultAuthGlobalRef` _[VaultAuthGlobalRef](#vaultauthglobalref)_ | VaultAuthGlobalRef. | | | | `namespace` _string_ | Namespace to auth to in Vault | | | | `allowedNamespaces` _string array_ | AllowedNamespaces Kubernetes Namespaces which are allow-listed for use with this AuthMethod.
This field allows administrators to customize which Kubernetes namespaces are authorized to
use with this AuthMethod. While Vault will still enforce its own rules, this has the added
configurability of restricting which VaultAuthMethods can be used by which namespaces.
Accepted values:
[]{"*"} - wildcard, all namespaces.
[]{"a", "b"} - list of namespaces.
unset - disallow all namespaces except the Operator's the VaultAuthMethod's namespace, this
is the default behavior. | | | | `method` _string_ | Method to use when authenticating to Vault. | | Enum: [kubernetes jwt appRole aws gcp]
| @@ -887,3 +1137,4 @@ _Appears in:_ | `hmacSecretData` _boolean_ | HMACSecretData determines whether the Operator computes the
HMAC of the Secret's data. The MAC value will be stored in
the resource's Status.SecretMac field, and will be used for drift detection
and during incoming Vault secret comparison.
Enabling this feature is recommended to ensure that Secret's data stays consistent with Vault. | true | | | `rolloutRestartTargets` _[RolloutRestartTarget](#rolloutrestarttarget) array_ | RolloutRestartTargets should be configured whenever the application(s) consuming the Vault secret does
not support dynamically reloading a rotated secret.
In that case one, or more RolloutRestartTarget(s) can be configured here. The Operator will
trigger a "rollout-restart" for each target whenever the Vault secret changes between reconciliation events.
All configured targets wil be ignored if HMACSecretData is set to false.
See RolloutRestartTarget for more details. | | | | `destination` _[Destination](#destination)_ | Destination provides configuration necessary for syncing the Vault secret to Kubernetes. | | | +| `syncConfig` _[SyncConfig](#syncconfig)_ | SyncConfig configures sync behavior from Vault to VSO | | | diff --git a/website/content/docs/platform/k8s/vso/helm.mdx b/website/content/docs/platform/k8s/vso/helm.mdx index ac29f51ca49b..3aa610d23488 100644 --- a/website/content/docs/platform/k8s/vso/helm.mdx +++ b/website/content/docs/platform/k8s/vso/helm.mdx @@ -23,6 +23,7 @@ Use these links to navigate to a particular top-level stanza. - [`defaultVaultConnection`](#h-defaultvaultconnection) - [`defaultAuthMethod`](#h-defaultauthmethod) - [`telemetry`](#h-telemetry) +- [`hooks`](#h-hooks) - [`tests`](#h-tests) ## All Values @@ -165,7 +166,7 @@ Use these links to navigate to a particular top-level stanza. - `repository` ((#v-controller-manager-image-repository)) (`string: hashicorp/vault-secrets-operator`) - - `tag` ((#v-controller-manager-image-tag)) (`string: 0.7.1`) + - `tag` ((#v-controller-manager-image-tag)) (`string: 0.8.0`) - `logging` ((#v-controller-manager-logging)) - logging @@ -196,11 +197,19 @@ Use these links to navigate to a particular top-level stanza. - `maxInterval` ((#v-controller-manager-backoffonsecretsourceerror-maxinterval)) (`duration: 60s`) - Maximum interval between retries. - - `maxElapsedTime` ((#v-controller-manager-backoffonsecretsourceerror-maxelapsedtime)) (`duration: 0s`) - Maximum elapsed time before giving up. + - `maxElapsedTime` ((#v-controller-manager-backoffonsecretsourceerror-maxelapsedtime)) (`duration: 0s`) - Maximum elapsed time without a successful sync from the secret's source. + It's important to note that setting this option to anything other than + its default will result in the secret sync no longer being retried after + reaching the max elapsed time. - - `randomizationFactor` ((#v-controller-manager-backoffonsecretsourceerror-randomizationfactor)) (`float: 0.5`) - Randomization factor to add jitter to the interval between retries. + - `randomizationFactor` ((#v-controller-manager-backoffonsecretsourceerror-randomizationfactor)) (`float: 0.5`) - Randomization factor randomizes the backoff interval between retries. + This helps to spread out the retries to avoid a thundering herd. + If the value is 0, then the backoff interval will not be randomized. + It is recommended to set this to a value that is greater than 0. - - `multiplier` ((#v-controller-manager-backoffonsecretsourceerror-multiplier)) (`float: 1.5`) - Sets the multiplier for increasing the interval between retries. + - `multiplier` ((#v-controller-manager-backoffonsecretsourceerror-multiplier)) (`float: 1.5`) - Sets the multiplier that is used to increase the backoff interval between retries. + This value should always be set to a value greater than 0. + The value must be greater than zero. - `clientCache` ((#v-controller-manager-clientcache)) - Configures the client cache which is used by the controller to cache (and potentially persist) vault tokens that are the result of using the VaultAuthMethod. This enables re-use of Vault Tokens @@ -581,6 +590,27 @@ Use these links to navigate to a particular top-level stanza. headers: X-vault-something1: "foo" + - `vaultAuthGlobalRef` ((#v-defaultauthmethod-vaultauthglobalref)) - VaultAuthGlobalRef + + - `enabled` ((#v-defaultauthmethod-vaultauthglobalref-enabled)) (`boolean: false`) - toggles the inclusion of the VaultAuthGlobal configuration in the + default VaultAuth CR. + + - `name` ((#v-defaultauthmethod-vaultauthglobalref-name)) (`string: ""`) - Name of the VaultAuthGlobal CR to reference. + + - `namespace` ((#v-defaultauthmethod-vaultauthglobalref-namespace)) (`string: ""`) - Namespace of the VaultAuthGlobal CR to reference. + + - `allowDefault` ((#v-defaultauthmethod-vaultauthglobalref-allowdefault)) (`boolean: ""`) - allow default globals + + - `mergeStrategy` ((#v-defaultauthmethod-vaultauthglobalref-mergestrategy)) + + - `headers` ((#v-defaultauthmethod-vaultauthglobalref-mergestrategy-headers)) (`string: none`) - merge strategy for headers + Valid values are: "replace", "merge", "none" + Default: "replace" + + - `params` ((#v-defaultauthmethod-vaultauthglobalref-mergestrategy-params)) (`string: none`) - merge strategy for params + Valid values are: "replace", "merge", "none" + Default: "replace" + ### telemetry ((#h-telemetry)) - `telemetry` ((#v-telemetry)) - Configures a Prometheus ServiceMonitor @@ -615,6 +645,35 @@ Use these links to navigate to a particular top-level stanza. - `scrapeTimeout` ((#v-telemetry-servicemonitor-scrapetimeout)) (`string: 10s`) - Timeout for Prometheus scrapes +### hooks ((#h-hooks)) + +- `hooks` ((#v-hooks)) - Configure the behaviour of Helm hooks. + + - `resources` ((#v-hooks-resources)) - Resources common to all hooks. + + - `limits` ((#v-hooks-resources-limits)) + + - `cpu` ((#v-hooks-resources-limits-cpu)) (`string: 500m`) + + - `memory` ((#v-hooks-resources-limits-memory)) (`string: 128Mi`) + + - `requests` ((#v-hooks-resources-requests)) + + - `cpu` ((#v-hooks-resources-requests-cpu)) (`string: 10m`) + + - `memory` ((#v-hooks-resources-requests-memory)) (`string: 64Mi`) + + - `upgradeCRDs` ((#v-hooks-upgradecrds)) - Configure the Helm pre-upgrade hook that handles custom resource definition (CRD) upgrades. + + - `enabled` ((#v-hooks-upgradecrds-enabled)) (`boolean: true`) - Set to true to automatically upgrade the CRDs. + Disabling this will require manual intervention to upgrade the CRDs, so it is recommended to + always leave it enabled. + + - `backoffLimit` ((#v-hooks-upgradecrds-backofflimit)) (`integer: 5`) - Limit the number of retries for the CRD upgrade. + + - `executionTimeout` ((#v-hooks-upgradecrds-executiontimeout)) (`string: 30s`) - Set the timeout for the CRD upgrade. The operation should typically take less than 5s + to complete. + ### tests ((#h-tests)) - `tests` ((#v-tests)) - # Used by unit tests, and will not be rendered except when using `helm template`, this can be safely ignored. diff --git a/website/content/docs/platform/k8s/vso/installation.mdx b/website/content/docs/platform/k8s/vso/installation.mdx index a2133d62e428..771d7018b144 100644 --- a/website/content/docs/platform/k8s/vso/installation.mdx +++ b/website/content/docs/platform/k8s/vso/installation.mdx @@ -4,6 +4,7 @@ page_title: Vault Secrets Operator Installation description: >- The Vault Secrets Operator can be installed using Helm. --- +@include 'vso/common-links.mdx' # Installing and upgrading the Vault Secrets Operator @@ -17,7 +18,7 @@ description: >- [Install Helm](https://helm.sh/docs/intro/install) before beginning. -The [Vault Secrets Operator Helm chart](/vault/docs/platform/k8s/vso/helm) is the recommended way of +The [Helm chart][helm] is the recommended way of installing and configuring the Vault Secrets Operator. To install a new instance of the Vault Secrets Operator, first add the @@ -31,13 +32,13 @@ $ helm repo add hashicorp https://helm.releases.hashicorp.com ```shell-session $ helm search repo hashicorp/vault-secrets-operator NAME CHART VERSION APP VERSION DESCRIPTION -hashicorp/vault-secrets-operator 0.7.1 0.7.1 Official HashiCorp Vault Secrets Operator Chart +hashicorp/vault-secrets-operator 0.8.0 0.8.0 Official HashiCorp Vault Secrets Operator Chart ``` Then install the Operator: ```shell-session -$ helm install --version 0.7.1 --create-namespace --namespace vault-secrets-operator vault-secrets-operator hashicorp/vault-secrets-operator +$ helm install --version 0.8.0 --create-namespace --namespace vault-secrets-operator vault-secrets-operator hashicorp/vault-secrets-operator ``` ## Upgrading using Helm @@ -54,57 +55,22 @@ Hang tight while we grab the latest from your chart repositories... Update Complete. ⎈Happy Helming!⎈ ``` - - You must update all CRDs manually before upgrading VSO. - Refer to Updating CRDs. - - -To upgrade your VSO release, replace `` with the VSO version you are upgrading to: -```shell-session -$ helm show crds --version hashicorp/vault-secrets-operator | kubectl apply -f - -$ helm upgrade --version --namespace vault-secrets-operator vault-secrets-operator hashicorp/vault-secrets-operator -``` - -For example, if you are upgrading to VSO 0.7.1: -```shell-session -$ helm show crds --version 0.7.1 hashicorp/vault-secrets-operator | kubectl apply -f - -$ helm upgrade --version 0.7.1 --namespace vault-secrets-operator vault-secrets-operator hashicorp/vault-secrets-operator -``` - ## Updating CRDs when using Helm -You must update the CRDs for VSO manually **before** you upgrade the - operator when the operator is managed by Helm. + -**Any `kubectl` warnings related to `last-applied-configuration` should be safe to ignore.** + As of VSO 0.8.0, VSO will automatically update its CRDs. + The manual upgrade step [Updating CRDs](#updating-crds-when-using-helm-prior-to-vso-0-8-0) below is no longer required when + upgrading to VSO 0.8.0+. -To update the VSO CRDs, replace `` with the VSO version you are upgrading to: -```shell-session -$ helm show crds --version hashicorp/vault-secrets-operator | kubectl apply -f - -``` - -For example, if you are upgrading to VSO 0.7.1: -```shell-session -$ helm show crds --version 0.7.1 hashicorp/vault-secrets-operator | kubectl apply -f - + -customresourcedefinition.apiextensions.k8s.io/hcpauths.secrets.hashicorp.com created -customresourcedefinition.apiextensions.k8s.io/hcpvaultsecretsapps.secrets.hashicorp.com created -Warning: resource customresourcedefinitions/vaultauths.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. -customresourcedefinition.apiextensions.k8s.io/vaultauths.secrets.hashicorp.com configured -Warning: resource customresourcedefinitions/vaultconnections.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. -customresourcedefinition.apiextensions.k8s.io/vaultconnections.secrets.hashicorp.com configured -Warning: resource customresourcedefinitions/vaultdynamicsecrets.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. -customresourcedefinition.apiextensions.k8s.io/vaultdynamicsecrets.secrets.hashicorp.com configured -Warning: resource customresourcedefinitions/vaultpkisecrets.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. -customresourcedefinition.apiextensions.k8s.io/vaultpkisecrets.secrets.hashicorp.com configured -Warning: resource customresourcedefinitions/vaultstaticsecrets.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. -customresourcedefinition.apiextensions.k8s.io/vaultstaticsecrets.secrets.hashicorp.com configured -``` +The VSO Helm chart will automatically upgrade the CRDs to match the VSO version being deployed. +There should be no need to manually update the CRDs prior to upgrading VSO using Helm. ## Chart values -Refer to the [VSO Helm chart](/vault/docs/platform/k8s/vso/helm) - overview for a full list of supported chart values. +Refer to the [Helm chart][helm] overview for a full list of supported chart values. ## Installation using Kustomize @@ -112,9 +78,9 @@ You can install and update your installation using `kustomize` which allows you To install using Kustomize, download and untar/unzip the latest release from the [Releases Page](https://github.com/hashicorp/vault-secrets-operator/releases). ```shell-session -$ wget -q https://github.com/hashicorp/vault-secrets-operator/archive/refs/tags/v0.7.1.tar.gz -$ tar -zxf v0.7.1.tar.gz -$ cd vault-secrets-operator-0.7.1/ +$ wget -q https://github.com/hashicorp/vault-secrets-operator/archive/refs/tags/v0.8.0.tar.gz +$ tar -zxf v0.8.0.tar.gz +$ cd vault-secrets-operator-0.8.0/ ``` Next install using `kustomize build`: @@ -162,3 +128,43 @@ vault-secrets-operator-system vault-secrets-operator-controller-manager-56754d Upgrading using Kustomize is similar to installation: simply download the new release from github and follow the same steps as outlined in [Installation using Kustomize](#installation-using-kustomize). No additional steps are required to update the CRDs. + +## Legacy notes + +The following notes provide guidance for installing/upgrading older versions of VSO. + +### Updating CRDs when using Helm prior to VSO 0.8.0 + +This step can be skipped if you are upgrading to VSO 0.8.0 or later. + + + You must update all CRDs manually before upgrading VSO to a version prior to 0.8.0. + + +You must update the CRDs for VSO manually **before** you upgrade the +operator when the operator is managed by Helm. + +**Any `kubectl` warnings related to `last-applied-configuration` should be safe to ignore.** + +To update the VSO CRDs, replace `` with the VSO version you are upgrading to: +```shell-session +$ helm show crds --version hashicorp/vault-secrets-operator | kubectl apply -f - +``` + +For example, if you are upgrading to VSO 0.7.1: +```shell-session +$ helm show crds --version 0.7.1 hashicorp/vault-secrets-operator | kubectl apply -f - + +customresourcedefinition.apiextensions.k8s.io/hcpauths.secrets.hashicorp.com created +customresourcedefinition.apiextensions.k8s.io/hcpvaultsecretsapps.secrets.hashicorp.com created +Warning: resource customresourcedefinitions/vaultauths.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +customresourcedefinition.apiextensions.k8s.io/vaultauths.secrets.hashicorp.com configured +Warning: resource customresourcedefinitions/vaultconnections.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +customresourcedefinition.apiextensions.k8s.io/vaultconnections.secrets.hashicorp.com configured +Warning: resource customresourcedefinitions/vaultdynamicsecrets.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +customresourcedefinition.apiextensions.k8s.io/vaultdynamicsecrets.secrets.hashicorp.com configured +Warning: resource customresourcedefinitions/vaultpkisecrets.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +customresourcedefinition.apiextensions.k8s.io/vaultpkisecrets.secrets.hashicorp.com configured +Warning: resource customresourcedefinitions/vaultstaticsecrets.secrets.hashicorp.com is missing the kubectl.kubernetes.io/last-applied-configuration annotation which is required by kubectl apply. kubectl apply should only be used on resources created declaratively by either kubectl create --save-config or kubectl apply. The missing annotation will be patched automatically. +customresourcedefinition.apiextensions.k8s.io/vaultstaticsecrets.secrets.hashicorp.com configured +``` diff --git a/website/content/docs/platform/k8s/vso/openshift.mdx b/website/content/docs/platform/k8s/vso/openshift.mdx index 165c5f0c957a..610c3aab256b 100644 --- a/website/content/docs/platform/k8s/vso/openshift.mdx +++ b/website/content/docs/platform/k8s/vso/openshift.mdx @@ -26,13 +26,13 @@ Set the following environment variables [on the subscription](https://access.red ## Helm chart -The Vault Secrets Operator may also be installed in OpenShift using the Helm chart. (See [Installation](/vault/docs/platform/k8s/vso/installation) for an overview of installation using the [Helm chart](/vault/docs/platform/k8s/vso/helm).) The examples below show example [values.yaml files](https://helm.sh/docs/chart_template_guide/values_files/) for each configuration, which would be used with `helm install` as below: +The Vault Secrets Operator may also be installed in OpenShift using the Helm chart. (See [Helm chart][helm].) The examples below show example [values.yaml files](https://helm.sh/docs/chart_template_guide/values_files/) for each configuration, which would be used with `helm install` as below: ```shell-session $ helm install vault-secrets-operator hashicorp/vault-secrets-operator \ --create-namespace \ --namespace vault-secrets-operator \ - --version 0.7.1 \ + --version 0.8.0 \ --values values.yaml ``` @@ -65,7 +65,7 @@ controller: manager: image: repository: registry.connect.redhat.com/hashicorp/vault-secrets-operator - tag: 0.7.1-ubi + tag: 0.8.0-ubi resources: limits: memory: 256Mi diff --git a/website/content/docs/platform/k8s/vso/sources/vault/auth/index.mdx b/website/content/docs/platform/k8s/vso/sources/vault/auth/index.mdx new file mode 100644 index 000000000000..2dd96d9e13c9 --- /dev/null +++ b/website/content/docs/platform/k8s/vso/sources/vault/auth/index.mdx @@ -0,0 +1,502 @@ +--- +layout: docs +page_title: 'Vault Secrets Operator: Vault authentication details' +description: >- + Authenticate to Vault with the Vault Secrets Operator. +--- + +@include 'vso/common-links.mdx' + +# Vault authentication in detail + +## Auth configuration + +The Vault Secrets Operator (VSO) relies on `VaultAuth` resources to authenticate with Vault. It relies on credential +providers to generate the credentials necessary for authentication. For example, when VSO authenticates to a kubernetes +auth backend, it generates a token using the Kubernetes service account configured in the VaultAuth resource's defined +kubernetes auth method. The service account must be configured in the Kubernetes namespace of the requesting resource. +Meaning, if a resource like a `VaultStaticSecret` is created in the `apps` namespace, the service account must be in +the apps namespace. The rationale behind this approach is to ensure that cross namespace access is not possible. + +## Vault authentication globals + +The `VaultAuthGlobal` resource is a global configuration that allows you to share a single authentication configuration +across a set of VaultAuth resources. This is useful when you have multiple VaultAuth resources that share the +same base configuration. For example, if you have multiple VaultAuth resources that all authenticate to Vault +using the same auth backend, you can create a single VaultAuthGlobal resource that defines the configuration +common to all VaultAuth instances. Options like `mount`, `method`, `namespace`, and method specific configuration +can all be inherited from the VaultAuthGlobal resource. Any field in the VaultAuth resource can be inherited +from a VaultAuthGlobal instance. Typically, most fields are inherited from the VaultAuthGlobal, +fields like `role`, and credential provider specific fields like `serviceAccount` are usually set on the referring +VaultAuth instance, since they are more specific to the application that requires the VaultAuth resource. + +*See [VaultAuthGlobal spec][vag-spec] and [VaultAuth spec][va-spec] for the complete list of available fields.* + + +## VaultAuthGlobal configuration inheritance + +- The configuration in the VaultAuth resource takes precedence over the configuration in the VaultAuthGlobal resource. +- The VaultAuthGlobal can reside in any namespace, but must allow the namespace of the VaultAuth resource to reference it. +- Default VaultAuthGlobal resources are denoted by the name `default` and are automatically referenced by all VaultAuth resources + when `spec.vaultAuthGlobalRef.allowDefault` is set to `true` and VSO is running with the `allow-default-globals` + option set in the `-global-vault-auth-options` flag (the default). +- When a `spec.vaultAuthGlobalRef.namespace` is set, the search for the default VaultAuthGlobal resource is + constrained to that namespace. Otherwise, the search order is: + 1. The default VaultAuthGlobal resource in the referring VaultAuth resource's namespace. + 2. The default VaultAuthGlobal resource in the Operator's namespace. + + +## Sample use cases and configurations + +The following sections provide some sample use cases and configurations for the VaultAuthGlobal resource. These +examples demonstrate how to use the VaultAuthGlobal resource to share a common authentication configuration across a +set of VaultAuth resources. Like other namespaced VSO custom resource definitions, there can be many VaultAuthGlobal +resources configured in a single Kubernetes cluster. + +### Multiple applications with shared authentication backend + +A Vault admin has configured a Kubernetes auth backend in Vault mounted at `kubernetes`. The admin expects to have +two applications authenticate using their own roles, and service accounts. The admin creates the necessary roles in +Vault bound to the service accounts and namespaces of the applications. + +The admin creates a default VaultAuthGlobal with the following configuration: + +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuthGlobal +metadata: + name: default + namespace: admin +spec: + allowedNamespaces: + - apps + defaultAuthMethod: kubernetes + kubernetes: + audiences: + - vault + mount: kubernetes + role: default + serviceAccount: default + tokenExpirationSeconds: 600 +``` + +A developer creates a `VaultAuth` and VaultStaticSecret resource in their application's namespace with the following +configurations: + +Application 1 would have a configuration like this: +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + name: app1 + namespace: apps +spec: + kubernetes: + role: app1 + serviceAccount: app1 + vaultAuthGlobalRef: + allowDefault: true + namespace: admin +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: app1-secret + namespace: apps +spec: + destination: + create: true + name: app1-secret + hmacSecretData: true + mount: apps + path: app1 + type: kv-v2 + vaultAuthRef: app1 +``` + +Application 2 would have a similar configuration: +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + name: app2 + namespace: apps +spec: + kubernetes: + role: app2 + serviceAccount: app2 + vaultAuthGlobalRef: + allowDefault: true + namespace: admin +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: app2-secret + namespace: apps +spec: + destination: + create: true + name: app2-secret + hmacSecretData: true + mount: apps + path: app2 + type: kv-v2 + vaultAuthRef: app2 +``` + +#### Explanation + +- The default VaultAuthGlobal resource is created in the `admin` namespace. This resource defines the + common configuration for all VaultAuth resources that reference it. The `allowedNamespaces` field restricts the + VaultAuth resources that can reference this VaultAuthGlobal resource. In this case, only resources in the `apps` + namespace can reference this VaultAuthGlobal resource. +- The VaultAuth resources in the `apps` namespace reference the VaultAuthGlobal resource. This allows the VaultAuth + resources to inherit the configuration from the VaultAuthGlobal resource. The `role` and `serviceAccount` fields are + specific to the application and are not inherited from the VaultAuthGlobal resource. Since the + `.spec.vaultAuthGlobalRef.allowDefault` field is set to `true`, the VaultAuth resources will automatically reference the + `default` VaultAuthGlobal in defined namespace. +- The VaultStaticSecret resources in the `apps` namespace reference the VaultAuth resources. This allows the + VaultStaticSecret resources to authenticate to Vault in order to sync the KV secrets to the destination Kubernetes + Secret. + +### Multiple applications with shared authentication backend and role + +A Vault admin has configured a Kubernetes auth backend in Vault mounted at `kubernetes`. The admin expects to have +two applications authenticate using a single role, and service account. The admin creates the necessary role in +Vault bound to the same service account and namespace of the applications. + +The admin or developer creates a default VaultAuthGlobal in the application's namespace with the following +configuration: + +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuthGlobal +metadata: + name: default + namespace: apps +spec: + defaultAuthMethod: kubernetes + kubernetes: + audiences: + - vault + mount: kubernetes + role: apps + serviceAccount: apps + tokenExpirationSeconds: 600 +``` + +A developer creates single VaultAuth and the necessary VaultStatic secrets in their application's namespace with the +following: + +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + name: apps + namespace: apps +spec: + vaultAuthGlobalRef: + allowDefault: true +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: app1-secret + namespace: apps +spec: + destination: + create: true + name: app1-secret + hmacSecretData: true + mount: apps + path: app1 + type: kv-v2 + vaultAuthRef: apps +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: app2-secret + namespace: apps +spec: + destination: + create: true + name: app2-secret + hmacSecretData: true + mount: apps + path: app2 + type: kv-v2 + vaultAuthRef: apps +``` + +#### Explanation + +- The default VaultAuthGlobal resource is created in the `apps` namespace. It provides all the necessary configuration + for the VaultAuth resources that reference it. +- A single VaultAuth resource is created in the `apps` namespace. This resource references the VaultAuthGlobal resource + and inherits the configuration from it. +- The VaultStaticSecret resources in the `apps` namespace reference the VaultAuth resource. This allows the VaultStaticSecret + resources to authenticate to Vault in order to sync the KV secrets to the destination Kubernetes Secret. + +### Multiple applications with multiple authentication backends and roles + +A Vault admin has configured a Kubernetes auth backend in Vault mounted at `kubernetes`. In addition, the Vault +admin has configured a JWT auth backend mounted at `jwt`. The admin creates the necessary roles in Vault for each +auth method. The admin expects to have two applications authenticate, one using `kubernetes` auth and the other using `jwt` auth. + +The admin or developer creates a default VaultAuthGlobal in the application's namespace with the following +configuration: + +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuthGlobal +metadata: + name: default + namespace: apps +spec: + defaultAuthMethod: kubernetes + kubernetes: + audiences: + - vault + mount: kubernetes + role: apps + serviceAccount: apps-k8s + tokenExpirationSeconds: 600 + jwt: + audiences: + - vault + mount: jwt + role: apps + serviceAccount: apps-jwt +``` + +A developer creates a VaultAuth and VaultStaticSecret resource in their application's namespace with the following +configurations: + +Application 1 would have a configuration like this which will be using the kubernetes auth method: +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + name: apps-default + namespace: apps +spec: + # uses the default kubernetes auth method as defined in + # the VaultAuthGlobal .spec.defaultAuthMethod + vaultAuthGlobalRef: + allowDefault: true +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: app1-secret + namespace: apps +spec: + destination: + create: true + name: app1-secret + hmacSecretData: true + mount: apps + path: app1 + type: kv-v2 + vaultAuthRef: apps-default +``` + +Application 2 would have a similar configuration, except it will be using the JWT auth method: +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + name: apps-jwt + namespace: apps +spec: + method: jwt + vaultAuthGlobalRef: + allowDefault: true +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + name: app2-secret + namespace: apps +spec: + destination: + create: true + name: app2-secret + hmacSecretData: true + mount: apps + path: app2 + type: kv-v2 + vaultAuthRef: apps-jwt +``` + +#### Explanation + +- The default VaultAuthGlobal resource is created in the `apps` namespace. It provides all the necessary configuration + for the VaultAuth resources that reference it. The `defaultAuthMethod` field defines the default auth method to use + when authenticating to Vault. The `kubernetes` and `jwt` fields define the configuration for the respective auth + method. +- Application 1 uses the default kubernetes auth method defined in the VaultAuthGlobal resource. The VaultAuth resource + references the VaultAuthGlobal resource and inherits the kubernetes auth configuration from it. +- Application 2 uses the JWT auth method defined in the VaultAuthGlobal resource. The VaultAuth resource references the + VaultAuthGlobal resource and inherits the JWT auth configuration from it. +- Neither VaultAuth resource has a `role` or `serviceAccount` field set. This is because the `role` and `serviceAccount` + fields are defined in the VaultAuthGlobal resource and are inherited by the VaultAuth resources. + +## VaultAuthGlobal common errors and troubleshooting + +There are few sources for tracking down issues with VaultAuthGlobal resources: +- Vault Secrets Operator logs +- Kubernetes events +- Resource status + +Below are examples of errors from each source and how to resolve them: + + Sample output sync failures from the Vault Secrets Operator logs: + ```json + { + "level": "error", + "ts": "2024-07-16T17:35:20Z", + "logger": "cachingClientFactory", + "msg": "Failed to get cacheKey from obj", + "controller": "vaultstaticsecret", + "controllerGroup": "secrets.hashicorp.com", + "controllerKind": "VaultStaticSecret", + "VaultStaticSecret": { + "name": "app1", + "namespace": "apps" + }, + "namespace": "apps", + "name": "app1", + "reconcileID": "5201f597-6c5d-4d07-ae8f-30a39c80dc54", + "error": "failed getting admin/default, err=VaultAuthGlobal.secrets.hashicorp.com \"default\" not found" + } + ``` + + Check for related Kubernetes events: + + ```shell + $ kubectl events --types=Warning -n admin --for vaultauths.secrets.hashicorp.com/default -o json + ``` + + Sample output from the Kubernetes event for the VaultAuth resource: + + ```json + { + "kind": "Event", + "apiVersion": "v1", + "metadata": { + "name": "default.17e2c0da7b0e36b5", + "namespace": "admin", + "uid": "3ca6088e-7391-4b76-9443-a790ccae02c0", + "resourceVersion": "634396", + "creationTimestamp": "2024-07-16T17:14:12Z" + }, + "involvedObject": { + "kind": "VaultAuth", + "namespace": "admin", + "name": "default", + "uid": "1dabe3a5-5479-4f5d-ac48-5db7eff7f822", + "apiVersion": "secrets.hashicorp.com/v1beta1", + "resourceVersion": "631994" + }, + "reason": "Accepted", + "message": "Failed to handle VaultAuth resource request: err=failed getting admin/default, err=VaultAuthGlobal.secrets.hashicorp.com \"default\" not found", + "source": { + "component": "VaultAuth" + }, + "firstTimestamp": "2024-07-16T17:14:12Z", + "lastTimestamp": "2024-07-16T17:15:53Z", + "count": 25, + "type": "Warning", + "eventTime": null, + "reportingComponent": "VaultAuth", + "reportingInstance": "" + } + ``` + +Check the conditions on the VaultAuth resource: + + ```shell + $ kubectl get vaultauths.secrets.hashicorp.com -n admin default -o jsonpath='{.status}' + ``` + +Sample output of the VaultAuth's status (prettified). The `valid` field will be `false` for the condition reason +`VaultAuthGlobalRef`: + ```json + { + "conditions": [ + { + "lastTransitionTime": "2024-07-16T15:35:43Z", + "message": "failed getting admin/default, err=VaultAuthGlobal.secrets.hashicorp.com \"default\" not found", + "observedGeneration": 3, + "reason": "VaultAuthGlobalRef", + "status": "False", + "type": "Available" + } + ], + "specHash": "e264f241cb4ad776802924b6ad2aa272b11cffd570382605d1c2ddbdfd661ad3", + "valid": false + } + ``` +- **Situation**: The VaultAuthGlobal resource is not found or is invalid for some reason, denoted by error messages like +`not found...`. + + **Resolution**: Ensure that the VaultAuthGlobal resource exists in the referring VaultAuth's namespace or a default + VaultAuthGlobal resource exists per [VaultAuthGlobal configuration inheritance] + (#vaultauthglobal-configuration-inheritance) + +- **Situation**: The VaultAuthGlobal is not allowed to be referenced by the VaultAuth resource, denoted by error + messages like `target namespace "apps" is not allowed...`. + + **Resolution**: Ensure that the VaultAuthGlobal resource's `spec.allowedNamespaces` field includes the namespace of the + VaultAuth resource. + +- **Situation**: The VaultAuth resource is not valid due to missing required fields, denoted by error messages like + `invalid merge: empty role`. + + **Resolution**: Ensure all required fields are set either on the VaultAuth resource or on the inherited + VaultAuthGlobal. + + A successfully merged VaultAuth resource will have the `valid` field set to `true` and the `conditions` will look + something like: + + ```json + { + "conditions": [ + { + "lastTransitionTime": "2024-07-17T13:46:43Z", + "message": "VaultAuthGlobal successfully merged, key=admin/default, uid=6aeb3559-8f42-48bf-b16a-2305bc9a9bed, generation=7", + "observedGeneration": 1, + "reason": "VaultAuthGlobalRef", + "status": "True", + "type": "Available" + } + ], + "specHash": "5cbe5544d0557926e00002514871b95c49903a9d4496ef9b794c84f1e54db1a0", + "valid": true + } + ``` + + + + The value for the key in the message field is the namespace/name of the VaultAuthGlobal object that was successfully merged. + This is useful if you want to know which VaultAuthGlobal object was used to merge the VaultAuth object. + + + + +## Some authentication engines in detail + +- [AWS](/vault/docs/auth/aws) + +- [GCP](/vault/docs/auth/gcp) diff --git a/website/content/docs/platform/k8s/vso/sources/vault/index.mdx b/website/content/docs/platform/k8s/vso/sources/vault/index.mdx index 9957123d2653..12d0ddf5e719 100644 --- a/website/content/docs/platform/k8s/vso/sources/vault/index.mdx +++ b/website/content/docs/platform/k8s/vso/sources/vault/index.mdx @@ -4,6 +4,7 @@ page_title: Vault Secrets Operator description: >- The Vault Secrets Operator allows Pods to consume Vault secrets natively from Kubernetes Secrets. --- +@include 'vso/common-links.mdx' # Vault Secrets Operator @@ -29,6 +30,8 @@ Vault Secrets Operator supports the following Vault features: during drift remediation. - Cross Vault namespace authentication for Vault Enterprise 1.13+. - [Encrypted Vault client cache storage](/vault/docs/platform/k8s/vso/sources/vault#vault-client-cache) for improved performance and security. +- [Instant updates](/vault/docs/platform/k8s/vso/sources/vault#instant-updates) + for VaultStaticSecret's with Vault Enterprise 1.16.3+. ### Supported Vault authentication methods @@ -112,6 +115,76 @@ spec: # headers: [] ``` +### VaultAuthGlobal custom resource + + + +VSO v0.8.0 + + + +Namespaced resource that provides shared Vault authentication configuration that can be inherited by multiple +`VaultAuth` custom resources. It supports multiple authentication methods and allows you to define a default +authentication method that can be overridden by individual VaultAuth custom resources. See `vaultAuthGlobalRef` in +the [VaultAuth spec][va-spec] for more details. The `VaultAuthGlobal` custom resource is optional and can be used to +simplify the configuration of multiple VaultAuth custom resources by reducing config duplication. Like other +namespaced VSO custom resources, there can be many VaultAuthGlobal resources configured in a single Kubernetes cluster. + +For more details on how to integrate VaultAuthGlobals into your workflow, see the detailed [Authentication][auth] +docs. + + + + The VaultAuthGlobal resources shares many of the same fields as the VaultAuth custom resource, but cannot be used + for authentication directly. It is only used to define shared Vault authentication configuration within a Kubernetes + cluster. + + + +The example below demonstrates how to define a VaultAuthGlobal custom resource with a default authentication method of +`kubernetes`, along with a VaultAuth custom resource that inherits its global configuration. + +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuthGlobal +metadata: + namespace: vso-example + name: vault-auth-global +spec: + defaultAuthMethod: kubernetes + kubernetes: + audiences: + - vault + mount: kubernetes + namespace: example-ns + role: auth-role + serviceAccount: default + tokenExpirationSeconds: 600 +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultAuth +metadata: + namespace: vso-example + name: vault-auth +spec: + vaultAuthGlobalRef: + name: vault-auth-global + kubernetes: + role: local-role +``` + +#### Explanation + +- The VaultAuthGlobal custom resource defines a default authentication method of kubernetes with the `defaultAuthMethod` + field. +- The VaultAuth custom resource inherits the global configuration by referencing the VaultAuthGlobal custom + resource with the `vaultAuthGlobalRef` field. +- The `kubernetes.role` field in the VaultAuth custom resource spec overrides the value of the corresponding field in + the VaultAuthGlobal custom resource. All other fields are inherited from the VaultAuthGlobal custom resource + `spec.kubernetes` field, e.g., `audiences`, `mount`, `serviceAccount`, `namespace`, etc. + + ## Vault secret custom resource definitions Provide the configuration necessary for the Operator to replicate a single Vault Secret to a single Kubernetes Secret. @@ -305,6 +378,30 @@ The Vault Secrets Operator can optionally cache Vault client information such as The [Encrypted client cache](/vault/docs/platform/k8s/vso/sources/vault/client-cache) guide will walk you through the steps to enable and configure client cache encryption. +## Instant updates + +The Vault Secrets Operator can instantly update Kubernetes Secrets when changes +are made in Vault, by subscribing to [Vault Events][vault-events] for change +notification. Setting a refresh interval (e.g. [refreshAfter][vss-spec]) is +still recommended since event message delivery is not guaranteed. + +**Supported secret types:** +- [VaultStaticSecret](#vaultstaticsecret-custom-resource) ([kv-v1](/vault/docs/secrets/kv/kv-v2), + [kv-v2](/vault/docs/secrets/kv/kv-v1)) + + + +The instant updates option requires [Vault Enterprise](/vault/docs/enterprise) +1.16.3+ due to the use of [Vault Event Notifications][vault-events]. + + + +The [Instant updates](/vault/docs/platform/k8s/vso/sources/vault/instant-updates) guide +will walk you through the steps to enable instant updates for a VaultStaticSecret. + +[vss-spec]: /vault/docs/platform/k8s/vso/api-reference#vaultstaticsecretspec +[vault-events]: /vault/docs/concepts/events + ## Tutorial Refer to the [Vault Secrets Operator on diff --git a/website/content/docs/platform/k8s/vso/sources/vault/instant-updates.mdx b/website/content/docs/platform/k8s/vso/sources/vault/instant-updates.mdx new file mode 100644 index 000000000000..a6f739988e6c --- /dev/null +++ b/website/content/docs/platform/k8s/vso/sources/vault/instant-updates.mdx @@ -0,0 +1,102 @@ +--- +layout: docs +page_title: Instant updates with Vault Secrets Operator +description: >- + Enable instant updates with Vault Secrets Operator. +--- + +# Instant updates for a VaultStaticSecret + +Vault Secrets Operator (VSO) supports instant updates for +[VaultStaticSecrets][vss-spec] by subscribing to event notifications from Vault. + +## Before you start + +- **You must have [Vault Secrets Operator](/vault/docs/platform/k8s/vso/sources/vault) installed**. +- **You must use [Vault Enterprise](/vault/docs/enterprise) version 1.16.3 or later**. + +## Step 1: Set event permissions + +Grant these permissions in the policy associated with the VaultAuth role: + + ```hcl + path "/" { + capabilities = ["read", "list", "subscribe"] + subscribe_event_types = ["*"] + } + + path "sys/events/subscribe/kv*" { + capabilities = ["read"] + } + ``` + + + +See [Event Notifications Policies][events-policies] for more information on +Vault event notification permissions. + + + +## Step 2: Enable instant updates on the VaultStaticSecret + +Set `syncConfig.instantUpdates=true` in the [VaultStaticSecret spec][vss-spec]: + +```yaml +--- +apiVersion: secrets.hashicorp.com/v1beta1 +kind: VaultStaticSecret +metadata: + namespace: vso-example + name: vault-static-secret-v2 +spec: + vaultAuthRef: vault-auth + mount: + type: kv-v2 + path: + version: 2 + refreshAfter: 1h + destination: + create: true + name: static-secret2 + syncConfig: + instantUpdates: true +``` + +## Debugging + +Check Kubernetes events on the VaultStaticSecret resource to see if VSO +subscribed to Vault event notifications. + +### Example: VSO is subscribed to Vault event notifications for the secret + +```shell-session +$ kubectl describe vaultstaticsecret vault-static-secret-v2 -n vso-example +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SecretSynced 2s VaultStaticSecret Secret synced + Normal EventWatcherStarted 2s (x2 over 2s) VaultStaticSecret Started watching events + Normal SecretRotated 2s VaultStaticSecret Secret synced +``` + +### Example: The VaultAuth role policy lacks the required event permissions + +```shell-session +$ kubectl describe vaultstaticsecret vault-static-secret-v2 -n vso-example +... +Events: + Type Reason Age From Message + ---- ------ ---- ---- ------- + Normal SecretSynced 2s VaultStaticSecret Secret synced + Warning EventWatcherError 2s VaultStaticSecret Error while watching events: + failed to connect to vault websocket: error returned when opening event stream + web socket to wss://vault.vault.svc.cluster.local:8200/v1/sys/events/subscribe/kv%2A?json=true, + ensure VaultAuth role has correct permissions and Vault is Enterprise version + 1.16 or above: {"errors":["1 error occurred:\n\t* permission denied\n\n"]} + Normal SecretRotated 2s VaultStaticSecret Secret synced +``` + +[vss-spec]: /vault/docs/platform/k8s/vso/api-reference#vaultstaticsecretspec +[vault-events]: /vault/docs/concepts/events +[events-policies]: /vault/docs/concepts/events#policies diff --git a/website/content/docs/release-notes/1.16.1.mdx b/website/content/docs/release-notes/1.16.1.mdx index f9f79bf884b7..a5a324516240 100644 --- a/website/content/docs/release-notes/1.16.1.mdx +++ b/website/content/docs/release-notes/1.16.1.mdx @@ -24,6 +24,7 @@ description: |- | 1.16.1 - 1.16.3 | [New nodes added by autopilot upgrades provisioned with the wrong version](/vault/docs/upgrading/upgrade-to-1.15.x#new-nodes-added-by-autopilot-upgrades-provisioned-with-the-wrong-version) | | 1.15.8+ | [Autopilot upgrade for Vault Enterprise fails](/vault/docs/upgrading/upgrade-to-1.15.x#autopilot) | | 1.16.5 | [Listener stops listening on untrusted upstream connection with particular config settings](/vault/docs/upgrading/upgrade-to-1.16.x#listener-proxy-protocol-config) | +| 1.16.3 - 1.16.6 | [Vault standby nodes not deleting removed entity-aliases from in-memory database](/vault/docs/upgrade-to-1.16.x#dangling-entity-alias-in-memory) | ## Vault companion updates diff --git a/website/content/docs/release-notes/1.17.0.mdx b/website/content/docs/release-notes/1.17.0.mdx index e0557991f715..7bfeaf4b707d 100644 --- a/website/content/docs/release-notes/1.17.0.mdx +++ b/website/content/docs/release-notes/1.17.0.mdx @@ -22,6 +22,7 @@ description: |- | Known issue (1.17.0) | [Vault Agent and Vault Proxy consume excessive amounts of CPU](/vault/docs/upgrading/upgrade-to-1.17.x#agent-proxy-cpu-1-17) | | Known issue (1.15.8+) | [Autopilot upgrade for Vault Enterprise fails](/vault/docs/upgrading/upgrade-to-1.15.x#autopilot) | | Known issue (1.17.1) | [Listener stops listening on untrusted upstream connection with particular config settings](/vault/docs/upgrading/upgrade-to-1.17.x#listener-proxy-protocol-config) | +| Known issue (1.17.0 - 1.17.2) | [Vault standby nodes not deleting removed entity-aliases from in-memory database](/vault/docs/upgrade-to-1.17.x#dangling-entity-alias-in-memory) ## Vault companion updates diff --git a/website/content/docs/upgrading/upgrade-to-1.16.x.mdx b/website/content/docs/upgrading/upgrade-to-1.16.x.mdx index 0e3d6ea7f436..74c511f0b3a7 100644 --- a/website/content/docs/upgrading/upgrade-to-1.16.x.mdx +++ b/website/content/docs/upgrading/upgrade-to-1.16.x.mdx @@ -115,3 +115,5 @@ decides to trigger the flag. More information can be found in the @include 'known-issues/1_16_secrets-sync-chroot-activation.mdx' @include 'known-issues/config_listener_proxy_protocol_behavior_issue.mdx' + +@include 'known-issues/dangling-entity-aliases-in-memory.mdx' diff --git a/website/content/docs/upgrading/upgrade-to-1.17.x.mdx b/website/content/docs/upgrading/upgrade-to-1.17.x.mdx index f0651364ddf5..813d922f39bf 100644 --- a/website/content/docs/upgrading/upgrade-to-1.17.x.mdx +++ b/website/content/docs/upgrading/upgrade-to-1.17.x.mdx @@ -90,3 +90,5 @@ incorrectly. For additional details, refer to the @include 'known-issues/config_listener_proxy_protocol_behavior_issue.mdx' @include 'known-issues/transit-input-on-cmac-response.mdx' + +@include 'known-issues/dangling-entity-aliases-in-memory.mdx' diff --git a/website/content/partials/helm/install.mdx b/website/content/partials/helm/install.mdx index f555c290dacf..24b738a05221 100644 --- a/website/content/partials/helm/install.mdx +++ b/website/content/partials/helm/install.mdx @@ -2,6 +2,7 @@ # List the available releases $ helm search repo hashicorp/vault -l NAME CHART VERSION APP VERSION DESCRIPTION +hashicorp/vault 0.28.1 1.17.2 Official HashiCorp Vault Chart hashicorp/vault 0.28.0 1.16.1 Official HashiCorp Vault Chart hashicorp/vault 0.27.0 1.15.2 Official HashiCorp Vault Chart hashicorp/vault 0.26.1 1.15.1 Official HashiCorp Vault Chart @@ -9,9 +10,8 @@ hashicorp/vault 0.26.0 1.15.1 Official HashiCorp Vault Chart hashicorp/vault 0.25.0 1.14.0 Official HashiCorp Vault Chart hashicorp/vault 0.24.0 1.13.1 Official HashiCorp Vault Chart hashicorp/vault 0.23.0 1.12.1 Official HashiCorp Vault Chart -hashicorp/vault 0.22.1 1.12.0 Official HashiCorp Vault Chart ... -# Install version 0.28.0 -$ helm install vault hashicorp/vault --version 0.28.0 +# Install version 0.28.1 +$ helm install vault hashicorp/vault --version 0.28.1 ``` diff --git a/website/content/partials/helm/repo.mdx b/website/content/partials/helm/repo.mdx index 7d76bf67d8d8..31fdd5da6f6d 100644 --- a/website/content/partials/helm/repo.mdx +++ b/website/content/partials/helm/repo.mdx @@ -4,5 +4,5 @@ $ helm repo add hashicorp https://helm.releases.hashicorp.com $ helm search repo hashicorp/vault NAME CHART VERSION APP VERSION DESCRIPTION -hashicorp/vault 0.28.0 1.16.1 Official HashiCorp Vault Chart +hashicorp/vault 0.28.1 1.17.2 Official HashiCorp Vault Chart ``` diff --git a/website/content/partials/known-issues/dangling-entity-aliases-in-memory.mdx b/website/content/partials/known-issues/dangling-entity-aliases-in-memory.mdx new file mode 100644 index 000000000000..c825725552e3 --- /dev/null +++ b/website/content/partials/known-issues/dangling-entity-aliases-in-memory.mdx @@ -0,0 +1,24 @@ + + +### Deleting an entity-aliases does not remove it from the in-memory database on standby nodes + +#### Affected versions + +##### Vault Community Edition + +- 1.16.3 +- 1.17.0 - 1.17.2 + +##### Enterprise + +- 1.16.3+ent - 1.16.6+ent +- 1.17.0+ent - 1.17.2+ent + +#### Issue + +Entity-aliases deleted from Vault are not removed from the in-memory database on +standby nodes within a cluster. As a result, API requests to create a new +entity-alias with the same mount accessor and name sent to a standby node will +fail. + +This bug is fixed in Vault 1.16.7+ent, 1.17.3, 1.17.3+ent and later. diff --git a/website/content/partials/vso/common-links.mdx b/website/content/partials/vso/common-links.mdx new file mode 100644 index 000000000000..bc034f392ae5 --- /dev/null +++ b/website/content/partials/vso/common-links.mdx @@ -0,0 +1,4 @@ +[va-spec]: /vault/docs/platform/k8s/vso/api-reference#vaultauthspec +[vag-spec]: /vault/docs/platform/k8s/vso/api-reference#vaultauthglobalspec +[helm]: /vault/docs/platform/k8s/vso/helm +[auth]: /vault/docs/platform/k8s/vso/sources/vault/auth diff --git a/website/data/docs-nav-data.json b/website/data/docs-nav-data.json index 5974bcfc6694..b76afd4215e1 100644 --- a/website/data/docs-nav-data.json +++ b/website/data/docs-nav-data.json @@ -376,7 +376,7 @@ }, { "title": "TCP", - "routes": [ + "routes": [ { "title": "Overview", "path": "configuration/listener/tcp" @@ -1116,7 +1116,7 @@ "path": "commands/transform/import" } ] - }, + }, { "title": "unwrap", "path": "commands/unwrap" @@ -2208,8 +2208,12 @@ "path": "platform/k8s/vso/sources/vault" }, { - "title": "Auth Methods", + "title": "Authentication", "routes": [ + { + "title": "Overview", + "path": "platform/k8s/vso/sources/vault/auth" + }, { "title": "AWS", "path": "platform/k8s/vso/sources/vault/auth/aws" @@ -2223,6 +2227,10 @@ { "title": "Encrypted client cache", "path": "platform/k8s/vso/sources/vault/client-cache" + }, + { + "title": "Instant updates", + "path": "platform/k8s/vso/sources/vault/instant-updates" } ] },