diff --git a/.github/actions/set-up-buf/action.yml b/.github/actions/set-up-buf/action.yml
index e48f254509e3..b151a307a0e4 100644
--- a/.github/actions/set-up-buf/action.yml
+++ b/.github/actions/set-up-buf/action.yml
@@ -60,7 +60,7 @@ runs:
fi
mkdir -p tmp
- ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "buf-${OS}-${ARCH}.tar.gz" -O tmp/buf.tgz -R bufbuild/buf
+ ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "buf-${OS}-${ARCH}.tar.gz" -O tmp/buf.tgz -R bufbuild/buf
pushd tmp && tar -xvf buf.tgz && popd
mv tmp/buf/bin/buf "$DESTINATION"
rm -rf tmp
diff --git a/.github/actions/set-up-gofumpt/action.yml b/.github/actions/set-up-gofumpt/action.yml
index 2d046c7cb49e..884f915c29ea 100644
--- a/.github/actions/set-up-gofumpt/action.yml
+++ b/.github/actions/set-up-gofumpt/action.yml
@@ -56,6 +56,6 @@ runs:
export OS="darwin"
fi
- ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "gofumpt_*_${OS}_${ARCH}" -O gofumpt -R mvdan/gofumpt
+ ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "gofumpt_*_${OS}_${ARCH}" -O gofumpt -R mvdan/gofumpt
chmod +x gofumpt
mv gofumpt "$DESTINATION"
diff --git a/.github/actions/set-up-gosimports/action.yml b/.github/actions/set-up-gosimports/action.yml
index 3aacd2c31b4b..06623ecba052 100644
--- a/.github/actions/set-up-gosimports/action.yml
+++ b/.github/actions/set-up-gosimports/action.yml
@@ -57,7 +57,7 @@ runs:
fi
mkdir -p tmp
- ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "gosimports_*_${OS}_${ARCH}.tar.gz" -O tmp/gosimports.tgz -R rinchsan/gosimports
+ ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "gosimports_*_${OS}_${ARCH}.tar.gz" -O tmp/gosimports.tgz -R rinchsan/gosimports
pushd tmp && tar -xvf gosimports.tgz && popd
mv tmp/gosimports "$DESTINATION"
rm -rf tmp
diff --git a/.github/actions/set-up-gotestsum/action.yml b/.github/actions/set-up-gotestsum/action.yml
index e45ed9e43021..6ea84c450023 100644
--- a/.github/actions/set-up-gotestsum/action.yml
+++ b/.github/actions/set-up-gotestsum/action.yml
@@ -54,7 +54,7 @@ runs:
fi
mkdir -p tmp
- ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "*${OS}_${ARCH}.tar.gz" -O tmp/gotestsum.tgz -R gotestyourself/gotestsum
+ ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "*${OS}_${ARCH}.tar.gz" -O tmp/gotestsum.tgz -R gotestyourself/gotestsum
pushd tmp && tar -xvf gotestsum.tgz && popd
mv tmp/gotestsum "$DESTINATION"
rm -rf tmp
diff --git a/.github/actions/set-up-misspell/action.yml b/.github/actions/set-up-misspell/action.yml
index 4ce499eeeeb7..4447da06adda 100644
--- a/.github/actions/set-up-misspell/action.yml
+++ b/.github/actions/set-up-misspell/action.yml
@@ -57,7 +57,7 @@ runs:
fi
mkdir -p tmp
- ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "misspell_*_${OS}_${ARCH}.tar.gz" -O tmp/misspell.tgz -R golangci/misspell
+ ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "misspell_*_${OS}_${ARCH}.tar.gz" -O tmp/misspell.tgz -R golangci/misspell
pushd tmp && tar -xvf misspell.tgz && popd
mv tmp/misspell_"$(echo "$VERSION" | tr -d v)"_${OS}_${ARCH}/misspell "$DESTINATION"
rm -rf tmp
diff --git a/.github/actions/set-up-staticcheck/action.yml b/.github/actions/set-up-staticcheck/action.yml
index efd253ad054d..528474c4bdfa 100644
--- a/.github/actions/set-up-staticcheck/action.yml
+++ b/.github/actions/set-up-staticcheck/action.yml
@@ -57,7 +57,7 @@ runs:
fi
mkdir -p tmp
- ./.github/scripts/retry-command.sh gh release download "$VERSION" -p "staticcheck_${OS}_${ARCH}.tar.gz" -O tmp/staticcheck.tgz -R dominikh/go-tools
+ ./.github/scripts/retry-command.sh gh release download "$VERSION" --clobber -p "staticcheck_${OS}_${ARCH}.tar.gz" -O tmp/staticcheck.tgz -R dominikh/go-tools
pushd tmp && tar -xvf staticcheck.tgz && popd
mv tmp/staticcheck/staticcheck "$DESTINATION"
rm -rf tmp
diff --git a/.github/scripts/retry-command.sh b/.github/scripts/retry-command.sh
index 85ace489d140..76f0c902bae0 100755
--- a/.github/scripts/retry-command.sh
+++ b/.github/scripts/retry-command.sh
@@ -1,7 +1,7 @@
#!/usr/bin/env bash
# Copyright (c) HashiCorp, Inc.
# SPDX-License-Identifier: BUSL-1.1
-set -euo pipefail
+set -uo pipefail
tries=5
count=0
@@ -14,5 +14,5 @@ do
fi
((count++))
echo "trying again, attempt $count"
- sleep 2
+ sleep $count
done
diff --git a/api/sudo_paths.go b/api/sudo_paths.go
index 24beb4bb1f2a..d458cbde0f45 100644
--- a/api/sudo_paths.go
+++ b/api/sudo_paths.go
@@ -28,6 +28,7 @@ var sudoPaths = map[string]*regexp.Regexp{
"/sys/config/ui/headers": regexp.MustCompile(`^/sys/config/ui/headers/?$`),
"/sys/config/ui/headers/{header}": regexp.MustCompile(`^/sys/config/ui/headers/.+$`),
"/sys/internal/inspect/router/{tag}": regexp.MustCompile(`^/sys/internal/inspect/router/.+$`),
+ "/sys/internal/counters/activity/export": regexp.MustCompile(`^/sys/internal/counters/activity/export$`),
"/sys/leases": regexp.MustCompile(`^/sys/leases$`),
// This entry is a bit wrong... sys/leases/lookup does NOT require sudo. But sys/leases/lookup/ with a trailing
// slash DOES require sudo. But the part of the Vault CLI that uses this logic doesn't pass operation-appropriate
diff --git a/audit/backend_file.go b/audit/backend_file.go
index a1e07ef0dd72..1068cfb35765 100644
--- a/audit/backend_file.go
+++ b/audit/backend_file.go
@@ -76,12 +76,12 @@ func newFileBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*FileBa
return nil, err
}
- var opt []event.Option
+ sinkOpts := []event.Option{event.WithLogger(conf.Logger)}
if mode, ok := conf.Config[optionMode]; ok {
- opt = append(opt, event.WithFileMode(mode))
+ sinkOpts = append(sinkOpts, event.WithFileMode(mode))
}
- err = b.configureSinkNode(conf.MountPath, filePath, cfg.requiredFormat, opt...)
+ err = b.configureSinkNode(conf.MountPath, filePath, cfg.requiredFormat, sinkOpts...)
if err != nil {
return nil, err
}
diff --git a/audit/backend_socket.go b/audit/backend_socket.go
index 5e98b64f5426..20e58bc0e075 100644
--- a/audit/backend_socket.go
+++ b/audit/backend_socket.go
@@ -70,6 +70,7 @@ func newSocketBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*Sock
sinkOpts := []event.Option{
event.WithSocketType(socketType),
event.WithMaxDuration(writeDeadline),
+ event.WithLogger(conf.Logger),
}
err = event.ValidateOptions(sinkOpts...)
diff --git a/audit/backend_syslog.go b/audit/backend_syslog.go
index a55437260782..1da9fc107ea4 100644
--- a/audit/backend_syslog.go
+++ b/audit/backend_syslog.go
@@ -60,6 +60,7 @@ func newSyslogBackend(conf *BackendConfig, headersConfig HeaderFormatter) (*Sysl
sinkOpts := []event.Option{
event.WithFacility(facility),
event.WithTag(tag),
+ event.WithLogger(conf.Logger),
}
err = event.ValidateOptions(sinkOpts...)
diff --git a/audit/broker.go b/audit/broker.go
index 96cd1405f3d1..47680e7be311 100644
--- a/audit/broker.go
+++ b/audit/broker.go
@@ -15,7 +15,6 @@ import (
"github.com/armon/go-metrics"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/go-hclog"
- "github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/helper/namespace"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/logical"
@@ -253,7 +252,7 @@ func (b *Broker) Deregister(ctx context.Context, name string) error {
// LogRequest is used to ensure all the audit backends have an opportunity to
// log the given request and that *at least one* succeeds.
-func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret error) {
+func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (retErr error) {
b.RLock()
defer b.RUnlock()
@@ -265,18 +264,15 @@ func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret erro
defer metrics.MeasureSince([]string{"audit", "log_request"}, time.Now())
defer func() {
metricVal := float32(0.0)
- if ret != nil {
+ if retErr != nil {
metricVal = 1.0
}
metrics.IncrCounter([]string{"audit", "log_request_failure"}, metricVal)
}()
- var retErr *multierror.Error
-
e, err := NewEvent(RequestType)
if err != nil {
- retErr = multierror.Append(retErr, err)
- return retErr.ErrorOrNil()
+ return err
}
e.Data = in
@@ -295,8 +291,7 @@ func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret erro
// cancelled context and refuse to process the nodes further.
ns, err := namespace.FromContext(ctx)
if err != nil {
- retErr = multierror.Append(retErr, fmt.Errorf("namespace missing from context: %w", err))
- return retErr.ErrorOrNil()
+ return fmt.Errorf("namespace missing from context: %w", err)
}
tempContext, auditCancel := context.WithTimeout(context.Background(), timeout)
@@ -308,34 +303,38 @@ func (b *Broker) LogRequest(ctx context.Context, in *logical.LogInput) (ret erro
if hasAuditPipelines(b.broker) {
status, err = b.broker.Send(auditContext, event.AuditType.AsEventType(), e)
if err != nil {
- retErr = multierror.Append(retErr, multierror.Append(err, status.Warnings...))
- return retErr.ErrorOrNil()
+ return fmt.Errorf("%w: %w", err, errors.Join(status.Warnings...))
}
}
// Audit event ended up in at least 1 sink.
if len(status.CompleteSinks()) > 0 {
- return retErr.ErrorOrNil()
+ // We should log warnings to the operational logs regardless of whether
+ // we consider the overall auditing attempt to be successful.
+ if len(status.Warnings) > 0 {
+ b.logger.Error("log request underlying pipeline error(s)", "error", errors.Join(status.Warnings...))
+ }
+
+ return nil
}
// There were errors from inside the pipeline and we didn't write to a sink.
if len(status.Warnings) > 0 {
- retErr = multierror.Append(retErr, multierror.Append(errors.New("error during audit pipeline processing"), status.Warnings...))
- return retErr.ErrorOrNil()
+ return fmt.Errorf("error during audit pipeline processing: %w", errors.Join(status.Warnings...))
}
// Handle any additional audit that is required (Enterprise/CE dependant).
err = b.handleAdditionalAudit(auditContext, e)
if err != nil {
- retErr = multierror.Append(retErr, err)
+ return err
}
- return retErr.ErrorOrNil()
+ return nil
}
// LogResponse is used to ensure all the audit backends have an opportunity to
// log the given response and that *at least one* succeeds.
-func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret error) {
+func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (retErr error) {
b.RLock()
defer b.RUnlock()
@@ -347,18 +346,15 @@ func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret err
defer metrics.MeasureSince([]string{"audit", "log_response"}, time.Now())
defer func() {
metricVal := float32(0.0)
- if ret != nil {
+ if retErr != nil {
metricVal = 1.0
}
metrics.IncrCounter([]string{"audit", "log_response_failure"}, metricVal)
}()
- var retErr *multierror.Error
-
e, err := NewEvent(ResponseType)
if err != nil {
- retErr = multierror.Append(retErr, err)
- return retErr.ErrorOrNil()
+ return err
}
e.Data = in
@@ -377,8 +373,7 @@ func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret err
// cancelled context and refuse to process the nodes further.
ns, err := namespace.FromContext(ctx)
if err != nil {
- retErr = multierror.Append(retErr, fmt.Errorf("namespace missing from context: %w", err))
- return retErr.ErrorOrNil()
+ return fmt.Errorf("namespace missing from context: %w", err)
}
tempContext, auditCancel := context.WithTimeout(context.Background(), timeout)
@@ -390,29 +385,33 @@ func (b *Broker) LogResponse(ctx context.Context, in *logical.LogInput) (ret err
if hasAuditPipelines(b.broker) {
status, err = b.broker.Send(auditContext, event.AuditType.AsEventType(), e)
if err != nil {
- retErr = multierror.Append(retErr, multierror.Append(err, status.Warnings...))
- return retErr.ErrorOrNil()
+ return fmt.Errorf("%w: %w", err, errors.Join(status.Warnings...))
}
}
// Audit event ended up in at least 1 sink.
if len(status.CompleteSinks()) > 0 {
- return retErr.ErrorOrNil()
+ // We should log warnings to the operational logs regardless of whether
+ // we consider the overall auditing attempt to be successful.
+ if len(status.Warnings) > 0 {
+ b.logger.Error("log response underlying pipeline error(s)", "error", errors.Join(status.Warnings...))
+ }
+
+ return nil
}
// There were errors from inside the pipeline and we didn't write to a sink.
if len(status.Warnings) > 0 {
- retErr = multierror.Append(retErr, multierror.Append(errors.New("error during audit pipeline processing"), status.Warnings...))
- return retErr.ErrorOrNil()
+ return fmt.Errorf("error during audit pipeline processing: %w", errors.Join(status.Warnings...))
}
// Handle any additional audit that is required (Enterprise/CE dependant).
err = b.handleAdditionalAudit(auditContext, e)
if err != nil {
- retErr = multierror.Append(retErr, err)
+ return err
}
- return retErr.ErrorOrNil()
+ return nil
}
func (b *Broker) Invalidate(ctx context.Context, _ string) {
diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go
index a1b96ad392f1..8cfa8535cb5f 100644
--- a/builtin/logical/database/backend_test.go
+++ b/builtin/logical/database/backend_test.go
@@ -359,7 +359,7 @@ func TestBackend_BadConnectionString(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, _ := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, _ := postgreshelper.PrepareTestContainer(t)
defer cleanup()
respCheck := func(req *logical.Request) {
@@ -410,7 +410,7 @@ func TestBackend_basic(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// Configure a connection
@@ -665,7 +665,7 @@ func TestBackend_connectionCrud(t *testing.T) {
dbFactory.sys = sys
client := cluster.Cores[0].Client.Logical()
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// Mount the database plugin.
@@ -872,7 +872,7 @@ func TestBackend_roleCrud(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// Configure a connection
@@ -1121,7 +1121,7 @@ func TestBackend_allowedRoles(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// Configure a connection
@@ -1318,7 +1318,7 @@ func TestBackend_RotateRootCredentials(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}")
diff --git a/builtin/logical/database/path_roles_test.go b/builtin/logical/database/path_roles_test.go
index 91737da2cf8b..41a2e99758aa 100644
--- a/builtin/logical/database/path_roles_test.go
+++ b/builtin/logical/database/path_roles_test.go
@@ -222,7 +222,7 @@ func TestBackend_StaticRole_Config(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -487,7 +487,7 @@ func TestBackend_StaticRole_ReadCreds(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -667,7 +667,7 @@ func TestBackend_StaticRole_Updates(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -966,7 +966,7 @@ func TestBackend_StaticRole_Role_name_check(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
diff --git a/builtin/logical/database/rollback_test.go b/builtin/logical/database/rollback_test.go
index f60491a6662c..47c768374296 100644
--- a/builtin/logical/database/rollback_test.go
+++ b/builtin/logical/database/rollback_test.go
@@ -44,7 +44,7 @@ func TestBackend_RotateRootCredentials_WAL_rollback(t *testing.T) {
}
defer lb.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}")
@@ -183,7 +183,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_1(t *testing.T) {
}
defer lb.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}")
@@ -291,7 +291,7 @@ func TestBackend_RotateRootCredentials_WAL_no_rollback_2(t *testing.T) {
}
defer lb.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}")
diff --git a/builtin/logical/database/rotation_test.go b/builtin/logical/database/rotation_test.go
index c9917cb37458..99fc3ddf004b 100644
--- a/builtin/logical/database/rotation_test.go
+++ b/builtin/logical/database/rotation_test.go
@@ -63,7 +63,7 @@ func TestBackend_StaticRole_Rotation_basic(t *testing.T) {
b.schedule = &TestSchedule{}
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -274,7 +274,7 @@ func TestBackend_StaticRole_Rotation_Schedule_ErrorRecover(t *testing.T) {
b.schedule = &TestSchedule{}
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
t.Cleanup(cleanup)
// create the database user
@@ -458,7 +458,7 @@ func TestBackend_StaticRole_Rotation_NonStaticError(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -562,7 +562,7 @@ func TestBackend_StaticRole_Rotation_Revoke_user(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -783,7 +783,7 @@ func TestBackend_StaticRole_Rotation_QueueWAL_discard_role_newer_rotation_date(t
t.Fatal("could not convert to db backend")
}
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
@@ -952,7 +952,7 @@ func assertWALCount(t *testing.T, s logical.Storage, expected int, key string) {
type userCreator func(t *testing.T, username, password string)
func TestBackend_StaticRole_Rotation_PostgreSQL(t *testing.T) {
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
uc := userCreator(func(t *testing.T, username, password string) {
createTestPGUser(t, connURL, username, password, testRoleStaticCreate)
@@ -1246,7 +1246,7 @@ func TestBackend_StaticRole_Rotation_LockRegression(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// Configure a connection
@@ -1325,7 +1325,7 @@ func TestBackend_StaticRole_Rotation_Invalid_Role(t *testing.T) {
}
defer b.Cleanup(context.Background())
- cleanup, connURL := postgreshelper.PrepareTestContainer(t, "")
+ cleanup, connURL := postgreshelper.PrepareTestContainer(t)
defer cleanup()
// create the database user
diff --git a/builtin/logical/database/secret_creds.go b/builtin/logical/database/secret_creds.go
index e2130c1cf7da..b485f7ca46bb 100644
--- a/builtin/logical/database/secret_creds.go
+++ b/builtin/logical/database/secret_creds.go
@@ -34,6 +34,9 @@ func (b *databaseBackend) secretCredsRenew() framework.OperationFunc {
return nil, fmt.Errorf("secret is missing username internal data")
}
username, ok := usernameRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("username not a string")
+ }
roleNameRaw, ok := req.Secret.InternalData["role"]
if !ok {
@@ -98,6 +101,9 @@ func (b *databaseBackend) secretCredsRevoke() framework.OperationFunc {
return nil, fmt.Errorf("secret is missing username internal data")
}
username, ok := usernameRaw.(string)
+ if !ok {
+ return nil, fmt.Errorf("username not a string")
+ }
var resp *logical.Response
diff --git a/changelog/27750.txt b/changelog/27750.txt
new file mode 100644
index 000000000000..04c24fe59e7f
--- /dev/null
+++ b/changelog/27750.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+core/identity: Fixed an issue where deleted/reassigned entity-aliases were not removed from in-memory database.
+```
diff --git a/changelog/27790.txt b/changelog/27790.txt
new file mode 100644
index 000000000000..1475d0831a2b
--- /dev/null
+++ b/changelog/27790.txt
@@ -0,0 +1,3 @@
+```release-note:change
+activity (enterprise): filter all fields in client count responses by the request namespace
+```
\ No newline at end of file
diff --git a/changelog/27796.txt b/changelog/27796.txt
new file mode 100644
index 000000000000..7a1e7ebac3b0
--- /dev/null
+++ b/changelog/27796.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+ui: simplify the date range editing experience in the client counts dashboard.
+```
\ No newline at end of file
diff --git a/changelog/27809.txt b/changelog/27809.txt
new file mode 100644
index 000000000000..332c9155d95a
--- /dev/null
+++ b/changelog/27809.txt
@@ -0,0 +1,3 @@
+```release-note:improvement
+audit: Ensure that any underyling errors from audit devices are logged even if we consider auditing to be a success.
+```
\ No newline at end of file
diff --git a/changelog/27830.txt b/changelog/27830.txt
new file mode 100644
index 000000000000..6a3d7e3041f7
--- /dev/null
+++ b/changelog/27830.txt
@@ -0,0 +1,3 @@
+```release-note:change
+activity (enterprise): remove deprecated fields distinct_entities and non_entity_tokens
+```
\ No newline at end of file
diff --git a/changelog/27846.txt b/changelog/27846.txt
new file mode 100644
index 000000000000..50cba99062fb
--- /dev/null
+++ b/changelog/27846.txt
@@ -0,0 +1,7 @@
+```release-note:change
+activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) now requires the `sudo` ACL capability.
+```
+
+```release-note:improvement
+activity: The [activity export API](https://developer.hashicorp.com/vault/api-docs/system/internal-counters#activity-export) can now be called in non-root namespaces. Resulting records will be filtered to include the requested namespace (via `X-Vault-Namespace` header or within the path) and all child namespaces.
+```
diff --git a/changelog/27859.txt b/changelog/27859.txt
new file mode 100644
index 000000000000..d6836641fae7
--- /dev/null
+++ b/changelog/27859.txt
@@ -0,0 +1,4 @@
+```release-note:improvement
+audit: sinks (file, socket, syslog) will attempt to log errors to the server operational
+log before returning (if there are errors to log, and the context is done).
+```
diff --git a/command/agentproxyshared/auth/auth.go b/command/agentproxyshared/auth/auth.go
index 96f5026cbb92..91e189ed2604 100644
--- a/command/agentproxyshared/auth/auth.go
+++ b/command/agentproxyshared/auth/auth.go
@@ -313,10 +313,11 @@ func (ah *AuthHandler) Run(ctx context.Context, am AuthMethod) error {
isTokenFileMethod = path == "auth/token/lookup-self"
if isTokenFileMethod {
token, _ := data["token"].(string)
- lookupSelfClient, err := clientToUse.CloneWithHeaders()
- if err != nil {
+ // The error is called clientErr as to not shadow the other err above it.
+ lookupSelfClient, clientErr := clientToUse.CloneWithHeaders()
+ if clientErr != nil {
ah.logger.Error("failed to clone client to perform token lookup")
- return err
+ return clientErr
}
lookupSelfClient.SetToken(token)
secret, err = lookupSelfClient.Auth().Token().LookupSelf()
diff --git a/command/operator_usage.go b/command/operator_usage.go
index 117b3b78e018..199c54103663 100644
--- a/command/operator_usage.go
+++ b/command/operator_usage.go
@@ -132,7 +132,7 @@ func (c *OperatorUsageCommand) Run(args []string) int {
c.outputTimestamps(resp.Data)
out := []string{
- "Namespace path | Distinct entities | Non-Entity tokens | Secret syncs | ACME clients | Active clients",
+ "Namespace path | Entity Clients | Non-Entity clients | Secret syncs | ACME clients | Active clients",
}
out = append(out, c.namespacesOutput(resp.Data)...)
@@ -233,14 +233,14 @@ func (c *OperatorUsageCommand) parseNamespaceCount(rawVal interface{}) (UsageRes
return ret, errors.New("missing counts")
}
- ret.entityCount, ok = jsonNumberOK(counts, "distinct_entities")
+ ret.entityCount, ok = jsonNumberOK(counts, "entity_clients")
if !ok {
- return ret, errors.New("missing distinct_entities")
+ return ret, errors.New("missing entity_clients")
}
- ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_tokens")
+ ret.tokenCount, ok = jsonNumberOK(counts, "non_entity_clients")
if !ok {
- return ret, errors.New("missing non_entity_tokens")
+ return ret, errors.New("missing non_entity_clients")
}
// don't error if the secret syncs key is missing
@@ -311,15 +311,15 @@ func (c *OperatorUsageCommand) totalOutput(data map[string]interface{}) []string
return out
}
- entityCount, ok := jsonNumberOK(total, "distinct_entities")
+ entityCount, ok := jsonNumberOK(total, "entity_clients")
if !ok {
- c.UI.Error("missing distinct_entities in total")
+ c.UI.Error("missing entity_clients in total")
return out
}
- tokenCount, ok := jsonNumberOK(total, "non_entity_tokens")
+ tokenCount, ok := jsonNumberOK(total, "non_entity_clients")
if !ok {
- c.UI.Error("missing non_entity_tokens in total")
+ c.UI.Error("missing non_entity_clients in total")
return out
}
// don't error if secret syncs key is missing
diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go
index fa4dd38fbe77..7501d4c8622a 100644
--- a/command/pki_reissue_intermediate.go
+++ b/command/pki_reissue_intermediate.go
@@ -113,6 +113,10 @@ func (c *PKIReIssueCACommand) Run(args []string) int {
}
templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef)
+ if err != nil {
+ c.UI.Error(fmt.Sprintf("Error fetching parsing template certificate: %v", err))
+ return 1
+ }
data := updateTemplateWithData(templateData, userData)
return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data)
diff --git a/go.mod b/go.mod
index 1b89401067ee..a3239740a117 100644
--- a/go.mod
+++ b/go.mod
@@ -221,7 +221,7 @@ require (
golang.org/x/text v0.16.0
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d
google.golang.org/api v0.181.0
- google.golang.org/grpc v1.64.0
+ google.golang.org/grpc v1.64.1
google.golang.org/protobuf v1.34.1
gopkg.in/ory-am/dockertest.v3 v3.3.4
k8s.io/apimachinery v0.29.3
diff --git a/go.sum b/go.sum
index 521c41da882c..915c5400d50e 100644
--- a/go.sum
+++ b/go.sum
@@ -3016,8 +3016,8 @@ google.golang.org/grpc v1.52.3/go.mod h1:pu6fVzoFb+NBYNAvQL08ic+lvB2IojljRYuun5v
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
google.golang.org/grpc v1.54.0/go.mod h1:PUSEXI6iWghWaB6lXM4knEgpJNu2qUcKfDtNci3EC2g=
google.golang.org/grpc v1.56.3/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
-google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
-google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
+google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
diff --git a/helper/testhelpers/pluginhelpers/pluginhelpers.go b/helper/testhelpers/pluginhelpers/pluginhelpers.go
index 1a6643cae391..2d9c2355399d 100644
--- a/helper/testhelpers/pluginhelpers/pluginhelpers.go
+++ b/helper/testhelpers/pluginhelpers/pluginhelpers.go
@@ -76,15 +76,17 @@ func CompilePlugin(t testing.TB, typ consts.PluginType, pluginVersion string, pl
var pluginBytes []byte
dir := ""
- var err error
pluginRootDir := "builtin"
if typ == consts.PluginTypeDatabase {
pluginRootDir = "plugins"
}
for {
- dir, err = os.Getwd()
- if err != nil {
- t.Fatal(err)
+ // So that we can assign to dir without overshadowing the other
+ // err variables.
+ var getWdErr error
+ dir, getWdErr = os.Getwd()
+ if getWdErr != nil {
+ t.Fatal(getWdErr)
}
// detect if we are in a subdirectory or the root directory and compensate
if _, err := os.Stat(pluginRootDir); os.IsNotExist(err) {
@@ -128,15 +130,20 @@ func CompilePlugin(t testing.TB, typ consts.PluginType, pluginVersion string, pl
}
// write the cached plugin if necessary
- if _, err := os.Stat(pluginPath); os.IsNotExist(err) {
- err = os.WriteFile(pluginPath, pluginBytes, 0o755)
- }
- if err != nil {
- t.Fatal(err)
+ _, statErr := os.Stat(pluginPath)
+ if os.IsNotExist(statErr) {
+ err := os.WriteFile(pluginPath, pluginBytes, 0o755)
+ if err != nil {
+ t.Fatal(err)
+ }
+ } else {
+ if statErr != nil {
+ t.Fatal(statErr)
+ }
}
sha := sha256.New()
- _, err = sha.Write(pluginBytes)
+ _, err := sha.Write(pluginBytes)
if err != nil {
t.Fatal(err)
}
diff --git a/helper/testhelpers/postgresql/postgresqlhelper.go b/helper/testhelpers/postgresql/postgresqlhelper.go
index 7e5f25c626af..f0aa1203bdda 100644
--- a/helper/testhelpers/postgresql/postgresqlhelper.go
+++ b/helper/testhelpers/postgresql/postgresqlhelper.go
@@ -14,13 +14,29 @@ import (
"github.com/hashicorp/vault/sdk/helper/docker"
)
-func PrepareTestContainer(t *testing.T, version string) (func(), string) {
- env := []string{
- "POSTGRES_PASSWORD=secret",
- "POSTGRES_DB=database",
+const postgresVersion = "13.4-buster"
+
+func defaultRunOpts(t *testing.T) docker.RunOptions {
+ return docker.RunOptions{
+ ContainerName: "postgres",
+ ImageRepo: "docker.mirror.hashicorp.services/postgres",
+ ImageTag: postgresVersion,
+ Env: []string{
+ "POSTGRES_PASSWORD=secret",
+ "POSTGRES_DB=database",
+ },
+ Ports: []string{"5432/tcp"},
+ DoNotAutoRemove: false,
+ LogConsumer: func(s string) {
+ if t.Failed() {
+ t.Logf("container logs: %s", s)
+ }
+ },
}
+}
- _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env)
+func PrepareTestContainer(t *testing.T) (func(), string) {
+ _, cleanup, url, _ := prepareTestContainer(t, defaultRunOpts(t), "secret", true, false)
return cleanup, url
}
@@ -28,64 +44,82 @@ func PrepareTestContainer(t *testing.T, version string) (func(), string) {
// PrepareTestContainerWithVaultUser will setup a test container with a Vault
// admin user configured so that we can safely call rotate-root without
// rotating the root DB credentials
-func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context, version string) (func(), string) {
- env := []string{
- "POSTGRES_PASSWORD=secret",
- "POSTGRES_DB=database",
+func PrepareTestContainerWithVaultUser(t *testing.T, ctx context.Context) (func(), string) {
+ runner, cleanup, url, id := prepareTestContainer(t, defaultRunOpts(t), "secret", true, false)
+
+ cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"}
+ _, err := runner.RunCmdInBackground(ctx, id, cmd)
+ if err != nil {
+ t.Fatalf("Could not run command (%v) in container: %v", cmd, err)
}
- runner, cleanup, url, id := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, "secret", true, false, false, env)
+ return cleanup, url
+}
+
+func PrepareTestContainerWithSSL(t *testing.T, ctx context.Context, version string) (func(), string) {
+ runOpts := defaultRunOpts(t)
+ runOpts.Cmd = []string{"-c", "log_statement=all"}
+ runner, cleanup, url, id := prepareTestContainer(t, runOpts, "secret", true, false)
+
+ content := "echo 'hostssl all all all cert clientcert=verify-ca' > /var/lib/postgresql/data/pg_hba.conf"
+ // Copy the ssl init script into the newly running container.
+ buildCtx := docker.NewBuildContext()
+ buildCtx["ssl-conf.sh"] = docker.PathContentsFromBytes([]byte(content))
+ if err := runner.CopyTo(id, "/usr/local/bin", buildCtx); err != nil {
+ t.Fatalf("Could not copy ssl init script into container: %v", err)
+ }
- cmd := []string{"psql", "-U", "postgres", "-c", "CREATE USER vaultadmin WITH LOGIN PASSWORD 'vaultpass' SUPERUSER"}
+ // run the ssl init script to overwrite the pg_hba.conf file and set it to
+ // require SSL for each connection
+ cmd := []string{"bash", "/usr/local/bin/ssl-conf.sh"}
_, err := runner.RunCmdInBackground(ctx, id, cmd)
if err != nil {
t.Fatalf("Could not run command (%v) in container: %v", cmd, err)
}
+ // reload so the config changes take effect
+ cmd = []string{"psql", "-U", "postgres", "-c", "SELECT pg_reload_conf()"}
+ _, err = runner.RunCmdInBackground(ctx, id, cmd)
+ if err != nil {
+ t.Fatalf("Could not run command (%v) in container: %v", cmd, err)
+ }
+
return cleanup, url
}
func PrepareTestContainerWithPassword(t *testing.T, version, password string) (func(), string) {
- env := []string{
+ runOpts := defaultRunOpts(t)
+ runOpts.Env = []string{
"POSTGRES_PASSWORD=" + password,
"POSTGRES_DB=database",
}
- _, cleanup, url, _ := prepareTestContainer(t, "postgres", "docker.mirror.hashicorp.services/postgres", version, password, true, false, false, env)
+ _, cleanup, url, _ := prepareTestContainer(t, runOpts, password, true, false)
return cleanup, url
}
func PrepareTestContainerRepmgr(t *testing.T, name, version string, envVars []string) (*docker.Runner, func(), string, string) {
- env := append(envVars,
+ runOpts := defaultRunOpts(t)
+ runOpts.ImageRepo = "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr"
+ runOpts.ImageTag = version
+ runOpts.Env = append(envVars,
"REPMGR_PARTNER_NODES=psql-repl-node-0,psql-repl-node-1",
"REPMGR_PRIMARY_HOST=psql-repl-node-0",
"REPMGR_PASSWORD=repmgrpass",
"POSTGRESQL_PASSWORD=secret")
+ runOpts.DoNotAutoRemove = true
- return prepareTestContainer(t, name, "docker.mirror.hashicorp.services/bitnami/postgresql-repmgr", version, "secret", false, true, true, env)
+ return prepareTestContainer(t, runOpts, "secret", false, true)
}
-func prepareTestContainer(t *testing.T, name, repo, version, password string,
- addSuffix, forceLocalAddr, doNotAutoRemove bool, envVars []string,
+func prepareTestContainer(t *testing.T, runOpts docker.RunOptions, password string, addSuffix, forceLocalAddr bool,
) (*docker.Runner, func(), string, string) {
if os.Getenv("PG_URL") != "" {
return nil, func() {}, "", os.Getenv("PG_URL")
}
- if version == "" {
- version = "11"
- }
-
- runOpts := docker.RunOptions{
- ContainerName: name,
- ImageRepo: repo,
- ImageTag: version,
- Env: envVars,
- Ports: []string{"5432/tcp"},
- DoNotAutoRemove: doNotAutoRemove,
- }
- if repo == "bitnami/postgresql-repmgr" {
+ if runOpts.ImageRepo == "bitnami/postgresql-repmgr" {
runOpts.NetworkID = os.Getenv("POSTGRES_MULTIHOST_NET")
}
@@ -94,7 +128,7 @@ func prepareTestContainer(t *testing.T, name, repo, version, password string,
t.Fatalf("Could not start docker Postgres: %s", err)
}
- svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, repo))
+ svc, containerID, err := runner.StartNewService(context.Background(), addSuffix, forceLocalAddr, connectPostgres(password, runOpts.ImageRepo))
if err != nil {
t.Fatalf("Could not start docker Postgres: %s", err)
}
diff --git a/internal/observability/event/options.go b/internal/observability/event/options.go
index 62fb4265954e..7e419d559516 100644
--- a/internal/observability/event/options.go
+++ b/internal/observability/event/options.go
@@ -6,10 +6,12 @@ package event
import (
"fmt"
"os"
+ "reflect"
"strconv"
"strings"
"time"
+ "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/go-uuid"
)
@@ -26,6 +28,7 @@ type options struct {
withSocketType string
withMaxDuration time.Duration
withFileMode *os.FileMode
+ withLogger hclog.Logger
}
// getDefaultOptions returns Options with their default values.
@@ -201,3 +204,15 @@ func WithFileMode(mode string) Option {
return nil
}
}
+
+// WithLogger provides an Option to supply a logger which will be used to write logs.
+// NOTE: If no logger is supplied then logging may not be possible.
+func WithLogger(l hclog.Logger) Option {
+ return func(o *options) error {
+ if l != nil && !reflect.ValueOf(l).IsNil() {
+ o.withLogger = l
+ }
+
+ return nil
+ }
+}
diff --git a/internal/observability/event/options_test.go b/internal/observability/event/options_test.go
index a3e47a2c487c..2b6a1fe3ae8f 100644
--- a/internal/observability/event/options_test.go
+++ b/internal/observability/event/options_test.go
@@ -8,6 +8,7 @@ import (
"testing"
"time"
+ "github.com/hashicorp/go-hclog"
"github.com/stretchr/testify/require"
)
@@ -423,3 +424,37 @@ func TestOptions_WithFileMode(t *testing.T) {
})
}
}
+
+// TestOptions_WithLogger exercises WithLogger Option to ensure it performs as expected.
+func TestOptions_WithLogger(t *testing.T) {
+ t.Parallel()
+
+ tests := map[string]struct {
+ value hclog.Logger
+ isNilExpected bool
+ }{
+ "nil-pointer": {
+ value: nil,
+ isNilExpected: true,
+ },
+ "logger": {
+ value: hclog.NewNullLogger(),
+ },
+ }
+
+ for name, tc := range tests {
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ opts := &options{}
+ applyOption := WithLogger(tc.value)
+ err := applyOption(opts)
+ require.NoError(t, err)
+ if tc.isNilExpected {
+ require.Nil(t, opts.withLogger)
+ } else {
+ require.NotNil(t, opts.withLogger)
+ }
+ })
+ }
+}
diff --git a/internal/observability/event/sink_file.go b/internal/observability/event/sink_file.go
index 0f5e22e4c8de..ea2047e9eb73 100644
--- a/internal/observability/event/sink_file.go
+++ b/internal/observability/event/sink_file.go
@@ -14,6 +14,7 @@ import (
"sync"
"github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-hclog"
)
// defaultFileMode is the default file permissions (read/write for everyone).
@@ -31,6 +32,7 @@ type FileSink struct {
fileMode os.FileMode
path string
requiredFormat string
+ logger hclog.Logger
}
// NewFileSink should be used to create a new FileSink.
@@ -69,6 +71,7 @@ func NewFileSink(path string, format string, opt ...Option) (*FileSink, error) {
fileMode: mode,
requiredFormat: format,
path: p,
+ logger: opts.withLogger,
}
// Ensure that the file can be successfully opened for writing;
@@ -82,13 +85,22 @@ func NewFileSink(path string, format string, opt ...Option) (*FileSink, error) {
}
// Process handles writing the event to the file sink.
-func (s *FileSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
+func (s *FileSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
+ defer func() {
+ // If the context is errored (cancelled), and we were planning to return
+ // an error, let's also log (if we have a logger) in case the eventlogger's
+ // status channel and errors propagated.
+ if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil {
+ s.logger.Error("file sink error", "context", err, "error", retErr)
+ }
+ }()
+
if e == nil {
return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter)
}
diff --git a/internal/observability/event/sink_socket.go b/internal/observability/event/sink_socket.go
index 7d7502306086..0761a46be886 100644
--- a/internal/observability/event/sink_socket.go
+++ b/internal/observability/event/sink_socket.go
@@ -12,6 +12,7 @@ import (
"time"
"github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-multierror"
)
@@ -25,6 +26,7 @@ type SocketSink struct {
maxDuration time.Duration
socketLock sync.RWMutex
connection net.Conn
+ logger hclog.Logger
}
// NewSocketSink should be used to create a new SocketSink.
@@ -52,21 +54,28 @@ func NewSocketSink(address string, format string, opt ...Option) (*SocketSink, e
maxDuration: opts.withMaxDuration,
socketLock: sync.RWMutex{},
connection: nil,
+ logger: opts.withLogger,
}
return sink, nil
}
// Process handles writing the event to the socket.
-func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
+func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
- s.socketLock.Lock()
- defer s.socketLock.Unlock()
+ defer func() {
+ // If the context is errored (cancelled), and we were planning to return
+ // an error, let's also log (if we have a logger) in case the eventlogger's
+ // status channel and errors propagated.
+ if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil {
+ s.logger.Error("socket sink error", "context", err, "error", retErr)
+ }
+ }()
if e == nil {
return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter)
@@ -77,6 +86,9 @@ func (s *SocketSink) Process(ctx context.Context, e *eventlogger.Event) (*eventl
return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter)
}
+ s.socketLock.Lock()
+ defer s.socketLock.Unlock()
+
// Try writing and return early if successful.
err := s.write(ctx, formatted)
if err == nil {
diff --git a/internal/observability/event/sink_syslog.go b/internal/observability/event/sink_syslog.go
index 6d6b6b6aee2f..147b87089034 100644
--- a/internal/observability/event/sink_syslog.go
+++ b/internal/observability/event/sink_syslog.go
@@ -9,6 +9,7 @@ import (
"strings"
"github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-hclog"
gsyslog "github.com/hashicorp/go-syslog"
)
@@ -17,7 +18,8 @@ var _ eventlogger.Node = (*SyslogSink)(nil)
// SyslogSink is a sink node which handles writing events to syslog.
type SyslogSink struct {
requiredFormat string
- logger gsyslog.Syslogger
+ syslogger gsyslog.Syslogger
+ logger hclog.Logger
}
// NewSyslogSink should be used to create a new SyslogSink.
@@ -38,17 +40,32 @@ func NewSyslogSink(format string, opt ...Option) (*SyslogSink, error) {
return nil, fmt.Errorf("error creating syslogger: %w", err)
}
- return &SyslogSink{requiredFormat: format, logger: logger}, nil
+ syslog := &SyslogSink{
+ requiredFormat: format,
+ syslogger: logger,
+ logger: opts.withLogger,
+ }
+
+ return syslog, nil
}
// Process handles writing the event to the syslog.
-func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (*eventlogger.Event, error) {
+func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (_ *eventlogger.Event, retErr error) {
select {
case <-ctx.Done():
return nil, ctx.Err()
default:
}
+ defer func() {
+ // If the context is errored (cancelled), and we were planning to return
+ // an error, let's also log (if we have a logger) in case the eventlogger's
+ // status channel and errors propagated.
+ if err := ctx.Err(); err != nil && retErr != nil && s.logger != nil {
+ s.logger.Error("syslog sink error", "context", err, "error", retErr)
+ }
+ }()
+
if e == nil {
return nil, fmt.Errorf("event is nil: %w", ErrInvalidParameter)
}
@@ -58,7 +75,7 @@ func (s *SyslogSink) Process(ctx context.Context, e *eventlogger.Event) (*eventl
return nil, fmt.Errorf("unable to retrieve event formatted as %q: %w", s.requiredFormat, ErrInvalidParameter)
}
- _, err := s.logger.Write(formatted)
+ _, err := s.syslogger.Write(formatted)
if err != nil {
return nil, fmt.Errorf("error writing to syslog: %w", err)
}
diff --git a/physical/postgresql/postgresql_test.go b/physical/postgresql/postgresql_test.go
index 301fc15ec263..0dc0ce948602 100644
--- a/physical/postgresql/postgresql_test.go
+++ b/physical/postgresql/postgresql_test.go
@@ -22,7 +22,7 @@ func TestPostgreSQLBackend(t *testing.T) {
// Use docker as pg backend if no url is provided via environment variables
connURL := os.Getenv("PGURL")
if connURL == "" {
- cleanup, u := postgresql.PrepareTestContainer(t, "11.1")
+ cleanup, u := postgresql.PrepareTestContainer(t)
defer cleanup()
connURL = u
}
diff --git a/plugins/database/postgresql/postgresql_test.go b/plugins/database/postgresql/postgresql_test.go
index 90184e10a3eb..23b04788bbfb 100644
--- a/plugins/database/postgresql/postgresql_test.go
+++ b/plugins/database/postgresql/postgresql_test.go
@@ -24,7 +24,7 @@ import (
)
func getPostgreSQL(t *testing.T, options map[string]interface{}) (*PostgreSQL, func()) {
- cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgresql.PrepareTestContainer(t)
connectionDetails := map[string]interface{}{
"connection_url": connURL,
@@ -70,7 +70,7 @@ func TestPostgreSQL_InitializeWithStringVals(t *testing.T) {
}
func TestPostgreSQL_Initialize_ConnURLWithDSNFormat(t *testing.T) {
- cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgresql.PrepareTestContainer(t)
defer cleanup()
dsnConnURL, err := dbutil.ParseURL(connURL)
@@ -185,7 +185,7 @@ func TestPostgreSQL_Initialize_CloudGCP(t *testing.T) {
// TestPostgreSQL_PasswordAuthentication tests that the default "password_authentication" is "none", and that
// an error is returned if an invalid "password_authentication" is provided.
func TestPostgreSQL_PasswordAuthentication(t *testing.T) {
- cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgresql.PrepareTestContainer(t)
defer cleanup()
dsnConnURL, err := dbutil.ParseURL(connURL)
@@ -227,7 +227,7 @@ func TestPostgreSQL_PasswordAuthentication(t *testing.T) {
// TestPostgreSQL_PasswordAuthentication_SCRAMSHA256 tests that password_authentication works when set to scram-sha-256.
// When sending an encrypted password, the raw password should still successfully authenticate the user.
func TestPostgreSQL_PasswordAuthentication_SCRAMSHA256(t *testing.T) {
- cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgresql.PrepareTestContainer(t)
defer cleanup()
dsnConnURL, err := dbutil.ParseURL(connURL)
@@ -1092,7 +1092,7 @@ func TestUsernameGeneration(t *testing.T) {
}
func TestNewUser_CustomUsername(t *testing.T) {
- cleanup, connURL := postgresql.PrepareTestContainer(t, "13.4-buster")
+ cleanup, connURL := postgresql.PrepareTestContainer(t)
defer cleanup()
type testCase struct {
diff --git a/ui/app/components/calendar-widget.js b/ui/app/components/calendar-widget.js
deleted file mode 100644
index 804c9af1963a..000000000000
--- a/ui/app/components/calendar-widget.js
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * Copyright (c) HashiCorp, Inc.
- * SPDX-License-Identifier: BUSL-1.1
- */
-
-import Component from '@glimmer/component';
-import { action } from '@ember/object';
-import { tracked } from '@glimmer/tracking';
-import { ARRAY_OF_MONTHS, parseAPITimestamp } from 'core/utils/date-formatters';
-import { addYears, isSameYear, subYears } from 'date-fns';
-import timestamp from 'core/utils/timestamp';
-/**
- * @module CalendarWidget
- * CalendarWidget component is used in the client counts dashboard to select a month/year to query the /activity endpoint.
- * The component returns an object with selected date info, example: { dateType: 'endDate', monthIdx: 0, monthName: 'January', year: 2022 }
- *
- * @example
- * ```js
- *
{{this.formattedDate @startTime}}
+—
+{{this.formattedDate @endTime}}
++ The start date will be used as the client counting start time and all clients in that month will be considered new. + {{#if this.version.isEnterprise}} + We recommend setting this date as your license or billing start date to get the most accurate new and total + client count estimations. These dates are only for querying data in storage. Editing the date range does not + change any license or billing configurations. + {{/if}} +
+{{this.formattedStartDate}}
-- {{this.versionText.description}} -
-{{this.versionText.label}}
-