From 4adec84e82a8929f2a5737240ec95a69af76ed86 Mon Sep 17 00:00:00 2001 From: Sven Eigenbrodt Date: Mon, 13 Mar 2023 09:58:37 +0100 Subject: [PATCH] add MAX_COMPACTION_LEVELS option --- DOCUMENTATION.md | 4 ++++ internal/conf/config.go | 1 + internal/conf/environment.go | 1 + internal/server/store.go | 10 +++++++++- 4 files changed, 15 insertions(+), 1 deletion(-) diff --git a/DOCUMENTATION.md b/DOCUMENTATION.md index 5f5574c..7204680 100644 --- a/DOCUMENTATION.md +++ b/DOCUMENTATION.md @@ -1385,6 +1385,10 @@ You can tune the LOG_LEVEL of the Datahub. The supported values are DEBUG and IN The Datahub supports reporting metrics trough a StatsD server. This is turned off if left empty, and you can turn it on by giving it an ip-address and a port combination. +```MAX_COMPACTION_LEVELS``` + +Can be used to override Badger's default 7 LSM levels. When more that 1.1TB disk space usage are exceeded or expected to be exceeded, 8 compaction levels are needed. + #### Securing the Data Hub There are four main security models for the data hub. diff --git a/internal/conf/config.go b/internal/conf/config.go index 1817d46..64dce86 100644 --- a/internal/conf/config.go +++ b/internal/conf/config.go @@ -80,6 +80,7 @@ func loadEnv(basePath *string, loadFromHome bool) (*Env, error) { FullsyncLeaseTimeout: viper.GetDuration("FULLSYNC_LEASE_TIMEOUT"), BlockCacheSize: viper.GetInt64("BLOCK_CACHE_SIZE"), ValueLogFileSize: viper.GetInt64("VALUE_LOG_FILE_SIZE"), + MaxCompactionLevels: viper.GetInt("MAX_COMPACTION_LEVELS"), AdminUserName: viper.GetString("ADMIN_USERNAME"), AdminPassword: viper.GetString("ADMIN_PASSWORD"), NodeId: viper.GetString("NODE_ID"), diff --git a/internal/conf/environment.go b/internal/conf/environment.go index f9e5b0c..36c0c37 100644 --- a/internal/conf/environment.go +++ b/internal/conf/environment.go @@ -36,6 +36,7 @@ type Env struct { FullsyncLeaseTimeout time.Duration BlockCacheSize int64 ValueLogFileSize int64 + MaxCompactionLevels int AdminUserName string AdminPassword string NodeId string diff --git a/internal/server/store.go b/internal/server/store.go index 21abfd2..7088aaa 100644 --- a/internal/server/store.go +++ b/internal/server/store.go @@ -59,6 +59,7 @@ type Store struct { fullsyncLeaseTimeout time.Duration blockCacheSize int64 valueLogFileSize int64 + maxCompactionLevels int } type BadgerLogger struct { // we use this to implement the Badger Logger interface @@ -86,6 +87,7 @@ func NewStore(lc fx.Lifecycle, env *conf.Env, statsdClient statsd.ClientInterfac fullsyncLeaseTimeout: fsTimeout, blockCacheSize: env.BlockCacheSize, valueLogFileSize: env.ValueLogFileSize, + maxCompactionLevels: env.MaxCompactionLevels, } store.NamespaceManager = NewNamespaceManager(store) @@ -342,7 +344,13 @@ func (s *Store) GetGlobalContext() *Context { func (s *Store) Open() error { s.logger.Info("Open database") opts := badger.DefaultOptions(s.storeLocation) - opts.MaxLevels = 8 // make badger accept data larger than 1.1TB + + if s.maxCompactionLevels > 0 { + // default is 7, set to 8 to make badger accept data larger than 1.1TB at the cost of larger compactions + opts.MaxLevels = s.maxCompactionLevels + } + s.logger.Infof("Max Compaction Levels: %v", opts.MaxLevels) + if s.blockCacheSize > 0 { opts.BlockCacheSize = s.blockCacheSize } else {