Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

OSS changes for new update-primary API endpoint #20277

Merged
merged 3 commits into from
Apr 20, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions sdk/helper/consts/consts.go
Original file line number Diff line number Diff line change
Expand Up @@ -39,4 +39,8 @@ const (
VaultEnableFilePermissionsCheckEnv = "VAULT_ENABLE_FILE_PERMISSIONS_CHECK"

VaultDisableUserLockout = "VAULT_DISABLE_USER_LOCKOUT"

PerformanceReplicationPathTarget = "performance"

DRReplicationPathParget = "dr"
)
14 changes: 10 additions & 4 deletions vault/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ import (
"github.com/hashicorp/vault/vault/cluster"
"github.com/hashicorp/vault/vault/eventbus"
"github.com/hashicorp/vault/vault/quotas"
"github.com/hashicorp/vault/vault/replication"
vaultseal "github.com/hashicorp/vault/vault/seal"
"github.com/hashicorp/vault/version"
"github.com/patrickmn/go-cache"
Expand Down Expand Up @@ -519,9 +520,9 @@ type Core struct {

// The active set of upstream cluster addresses; stored via the Echo
// mechanism, loaded by the balancer
atomicPrimaryClusterAddrs *atomic.Value
atomicPrimaryClusterAddrs *atomic.Pointer[replication.Primaries]

atomicPrimaryFailoverAddrs *atomic.Value
atomicPrimaryFailoverAddrs *atomic.Pointer[replication.Primaries]

// replicationState keeps the current replication state cached for quick
// lookup; activeNodeReplicationState stores the active value on standbys
Expand Down Expand Up @@ -704,6 +705,8 @@ type Core struct {

// if populated, override the default gRPC min connect timeout (currently 20s in grpc 1.51)
grpcMinConnectTimeout time.Duration

synchronousMerkleClean bool
}

// c.stateLock needs to be held in read mode before calling this function.
Expand Down Expand Up @@ -861,6 +864,8 @@ type CoreConfig struct {
PendingRemovalMountsAllowed bool

ExpirationRevokeRetryBase time.Duration

SynchronousMerkleClean bool
}

// GetServiceRegistration returns the config's ServiceRegistration, or nil if it does
Expand Down Expand Up @@ -990,8 +995,8 @@ func CreateCore(conf *CoreConfig) (*Core, error) {
introspectionEnabled: conf.EnableIntrospection,
shutdownDoneCh: new(atomic.Value),
replicationState: new(uint32),
atomicPrimaryClusterAddrs: new(atomic.Value),
atomicPrimaryFailoverAddrs: new(atomic.Value),
atomicPrimaryClusterAddrs: new(atomic.Pointer[replication.Primaries]),
atomicPrimaryFailoverAddrs: new(atomic.Pointer[replication.Primaries]),
localClusterPrivateKey: new(atomic.Value),
localClusterCert: new(atomic.Value),
localClusterParsedCert: new(atomic.Value),
Expand Down Expand Up @@ -1027,6 +1032,7 @@ func CreateCore(conf *CoreConfig) (*Core, error) {
experiments: conf.Experiments,
pendingRemovalMountsAllowed: conf.PendingRemovalMountsAllowed,
expirationRevokeRetryBase: conf.ExpirationRevokeRetryBase,
synchronousMerkleClean: conf.SynchronousMerkleClean,
}

c.standbyStopCh.Store(make(chan struct{}))
Expand Down
8 changes: 8 additions & 0 deletions vault/testing.go
Original file line number Diff line number Diff line change
Expand Up @@ -971,6 +971,10 @@ func (c *TestClusterCore) Seal(t testing.T) {
}
}

func (c *TestClusterCore) LogicalStorage() logical.Storage {
return c.barrier
}

func (c *TestClusterCore) stop() error {
c.Logger().Info("stopping vault test core")

Expand Down Expand Up @@ -1025,6 +1029,10 @@ func (c *TestClusterCore) TLSConfig() *tls.Config {
return c.tlsConfig.Clone()
}

func (c *TestClusterCore) ClusterListener() *cluster.Listener {
return c.getClusterListener()
}

func (c *TestCluster) Cleanup() {
c.Logger.Info("cleaning up vault cluster")
if tl, ok := c.Logger.(*corehelpers.TestLogger); ok {
Expand Down