From 8000741e43fe09bfde7de0578808659780e94056 Mon Sep 17 00:00:00 2001 From: VM <112189277+sysvm@users.noreply.github.com> Date: Tue, 21 Feb 2023 18:54:53 +0800 Subject: [PATCH] fix: use env var to get bucket url (#130) * fix: use env var to get bucket url * fix: improve loading config from env function * fix: rename variable names * fix: change piece store readme.md --------- Co-authored-by: DylanYong --- Dockerfile | 2 +- config/config.toml | 8 +++---- docker-entrypoint.sh | 4 ---- model/const.go | 3 +++ service/stonenode/stone_node.go | 2 +- service/stonenode/stone_node_config.go | 4 ++-- service/syncer/syncer.go | 2 +- service/syncer/syncer_config.go | 4 ++-- store/piecestore/README.md | 31 +++++++++++--------------- store/piecestore/piece/piece_store.go | 19 +++++++++++----- store/piecestore/storage/s3.go | 28 ++++++++++++++++++----- store/store.go | 27 ++++++++++++++++++---- test/e2e/onebox/setup_onebox.go | 6 ++--- 13 files changed, 89 insertions(+), 51 deletions(-) delete mode 100644 docker-entrypoint.sh diff --git a/Dockerfile b/Dockerfile index 04dd15fae..1596ce730 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,7 +26,7 @@ ARG USER=sp ARG USER_UID=1000 ARG USER_GID=1000 -ENV PACKAGES libstdc++ ca-certificates bash curl tini +ENV PACKAGES libstdc++ ca-certificates bash curl ENV WORKDIR=/app RUN apk add --no-cache $PACKAGES \ diff --git a/config/config.toml b/config/config.toml index eb2905f49..8b61e4875 100644 --- a/config/config.toml +++ b/config/config.toml @@ -59,9 +59,9 @@ Service = [ StoneHubServiceAddress = "127.0.0.1:9333" SyncerServiceAddress = ["127.0.0.1:9543", "127.0.0.1:9553", "127.0.0.1:9563", "127.0.0.1:9573", "127.0.0.1:9583", "127.0.0.1:9593"] StoneJobLimit = 64 - [StoneNodeCfg.PieceConfig] + [StoneNodeCfg.PieceStoreConfig] Shards = 0 - [StoneNodeCfg.PieceConfig.Store] + [StoneNodeCfg.PieceStoreConfig.Store] Storage = "file" BucketURL = "./data/primary_payload_data" MaxRetries = 5 @@ -71,7 +71,7 @@ Service = [ StorageProvider = "gnfd-test-sp" Address = "127.0.0.1:9533" MetaDBType = "leveldb" - [SyncerCfg.PieceConfig.Store] + [SyncerCfg.PieceStoreConfig.Store] Storage = "file" BucketURL = "./data/secondary_payload_data" MaxRetries = 5 @@ -98,7 +98,7 @@ Service = [ [ChallengeCfg] StorageProvider = "gnfd-test-sp" - Address = "127.0.0.1:9633" + Address = "127.0.0.1:9733" MetaDBType = "leveldb" [ChallengeCfg.PieceStoreConfig] Shards = 0 diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh deleted file mode 100644 index 67c141556..000000000 --- a/docker-entrypoint.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -set -e - -exec "/app/gnfd-sp -config config.toml > gnfd-sp.log 2>&1 &" "$@" diff --git a/model/const.go b/model/const.go index 341e43c6a..9ce3361d1 100644 --- a/model/const.go +++ b/model/const.go @@ -97,6 +97,9 @@ const ( // environment constants const ( + // Piece Store constants + BucketURL = "BUCKET_URL" + // AWS environment constants AWSAccessKey = "AWS_ACCESS_KEY" AWSSecretKey = "AWS_SECRET_KEY" diff --git a/service/stonenode/stone_node.go b/service/stonenode/stone_node.go index a27bb25cd..71dc66c7b 100644 --- a/service/stonenode/stone_node.go +++ b/service/stonenode/stone_node.go @@ -48,7 +48,7 @@ func (node *StoneNodeService) initClient() error { if node.running.Load() { return merrors.ErrStoneNodeStarted } - store, err := client.NewStoreClient(node.cfg.PieceConfig) + store, err := client.NewStoreClient(node.cfg.PieceStoreConfig) if err != nil { log.Errorw("stone node inits piece store client failed", "error", err) return err diff --git a/service/stonenode/stone_node_config.go b/service/stonenode/stone_node_config.go index d62a661d4..905f67ee3 100644 --- a/service/stonenode/stone_node_config.go +++ b/service/stonenode/stone_node_config.go @@ -7,7 +7,7 @@ type StoneNodeConfig struct { StoneHubServiceAddress string SyncerServiceAddress []string StorageProvider string - PieceConfig *storage.PieceStoreConfig + PieceStoreConfig *storage.PieceStoreConfig StoneJobLimit int64 } @@ -16,6 +16,6 @@ var DefaultStoneNodeConfig = &StoneNodeConfig{ StoneHubServiceAddress: "127.0.0.1:9333", SyncerServiceAddress: []string{"127.0.0.1:9533", "127.0.0.1:9543", "127.0.0.1:9553", "127.0.0.1:9563", "127.0.0.1:9573", "127.0.0.1:9583"}, StorageProvider: "bnb-sp", - PieceConfig: storage.DefaultPieceStoreConfig, + PieceStoreConfig: storage.DefaultPieceStoreConfig, StoneJobLimit: 64, } diff --git a/service/syncer/syncer.go b/service/syncer/syncer.go index 487db0ad5..5571b5503 100644 --- a/service/syncer/syncer.go +++ b/service/syncer/syncer.go @@ -45,7 +45,7 @@ func NewSyncerService(config *SyncerConfig) (*Syncer, error) { // initClient func (s *Syncer) initClient() error { - store, err := client.NewStoreClient(s.config.PieceConfig) + store, err := client.NewStoreClient(s.config.PieceStoreConfig) if err != nil { log.Errorw("syncer starts piece store client failed", "error", err) return err diff --git a/service/syncer/syncer_config.go b/service/syncer/syncer_config.go index 75a538fd3..4eba50208 100644 --- a/service/syncer/syncer_config.go +++ b/service/syncer/syncer_config.go @@ -14,7 +14,7 @@ type SyncerConfig struct { MetaDBType string MetaLevelDBConfig *config.LevelDBConfig MetaSqlDBConfig *config.SqlDBConfig - PieceConfig *storage.PieceStoreConfig + PieceStoreConfig *storage.PieceStoreConfig } var DefaultSyncerConfig = &SyncerConfig{ @@ -23,5 +23,5 @@ var DefaultSyncerConfig = &SyncerConfig{ MetaDBType: model.LevelDB, MetaLevelDBConfig: metalevel.DefaultMetaLevelDBConfig, MetaSqlDBConfig: metasql.DefaultMetaSqlDBConfig, - PieceConfig: storage.DefaultPieceStoreConfig, + PieceStoreConfig: storage.DefaultPieceStoreConfig, } diff --git a/store/piecestore/README.md b/store/piecestore/README.md index 9b8f5ce79..135dc4403 100644 --- a/store/piecestore/README.md +++ b/store/piecestore/README.md @@ -45,30 +45,25 @@ Amazon S3 can refer this [link](https://docs.aws.amazon.com/IAM/latest/UserGuide The number of sharding in object storage that supports multi-bucket storage. -### Config example +## Config Note -- Path-style and AccessKey, SecretKey +For safety, access key, secret key nad session token should be configured in environment: -```json -{ - "EndPoint": "https://s3.us-east-1.amazonaws.com/exmaple_bucket%d", - "AccessKey": "AccessKey", - "SecretKey": "SecretKey", - "Shards": 5 -} +```shell +export AWS_ACCESS_KEY="ACCESSKEY" +export AWS_SECRET_KEY="SECRETKEY" +export AWS_SESSION_TOKEN="SESSIONTOKEN" ``` -- Virtual-hosted-style and temporary access credentials +BucketURL can be configured in either environment or config.toml. -```json -{ - "EndPoint": "https://example_bucket.s3.us-east-1.amazonaws.com", - "AccessKey": "AccessKey", - "SecretKey": "SecretKey", - "SessionToken": "SessionToken" -} +```shell +export BUCKET_URL="BUCKETURL" ``` -If `Shards` is not set in config.json, the shard is 0, PieceStore won't shard. +If BucketURL is configured in environment, all services will use the same bucket ro write and read data. + +If `Shards` is not set in config.toml, the shard is 0, PieceStore won't shard. > More storage providers will be supported + diff --git a/store/piecestore/piece/piece_store.go b/store/piecestore/piece/piece_store.go index 36f4c5f55..a471a11d1 100644 --- a/store/piecestore/piece/piece_store.go +++ b/store/piecestore/piece/piece_store.go @@ -9,6 +9,7 @@ import ( "path/filepath" "runtime" + "github.com/bnb-chain/greenfield-storage-provider/model" merrors "github.com/bnb-chain/greenfield-storage-provider/model/errors" "github.com/bnb-chain/greenfield-storage-provider/store/piecestore/storage" "github.com/bnb-chain/greenfield-storage-provider/util/log" @@ -16,20 +17,21 @@ import ( // NewPieceStore returns an instance of PieceStore func NewPieceStore(pieceConfig *storage.PieceStoreConfig) (*PieceStore, error) { - cfg := checkConfig(pieceConfig) - blob, err := createStorage(cfg) + checkConfig(pieceConfig) + blob, err := createStorage(pieceConfig) if err != nil { log.Errorw("create storage error", "error", err) return nil, err } - log.Debugf("pieceStore is running", "Storage", cfg.Store.Storage, "BucketURL", - cfg.Store.BucketURL) + log.Debugf("pieceStore is running", "Storage", pieceConfig.Store.Storage, + "shards", pieceConfig.Shards) return &PieceStore{blob}, nil } // checkConfig checks config if right -func checkConfig(cfg *storage.PieceStoreConfig) *storage.PieceStoreConfig { +func checkConfig(cfg *storage.PieceStoreConfig) { + overrideConfigFromEnv(cfg) if cfg.Shards > 256 { log.Panicf("too many shards: %d", cfg.Shards) } @@ -50,7 +52,12 @@ func checkConfig(cfg *storage.PieceStoreConfig) *storage.PieceStoreConfig { cfg.Store.BucketURL = p cfg.Store.BucketURL += "/" } - return cfg +} + +func overrideConfigFromEnv(cfg *storage.PieceStoreConfig) { + if val, ok := os.LookupEnv(model.BucketURL); ok { + cfg.Store.BucketURL = val + } } func createStorage(cfg *storage.PieceStoreConfig) (storage.ObjectStorage, error) { diff --git a/store/piecestore/storage/s3.go b/store/piecestore/storage/s3.go index c06cafafb..a8c2b2820 100644 --- a/store/piecestore/storage/s3.go +++ b/store/piecestore/storage/s3.go @@ -252,13 +252,11 @@ func (sc *SessionCache) newSession(cfg ObjectStorageConfig) (*session.Session, s // in this TestMode, if you want to visit private bucket, you should provide accessKey, secretKey. // if TestMode is false, you can use service account or ec2 to visit you s3 straightly if cfg.TestMode { - accessKey := os.Getenv(model.AWSAccessKey) - secretKey := os.Getenv(model.AWSSecretKey) - sessionToken := os.Getenv(model.AWSSessionToken) + key := getAWSSecretKeyFromEnv() if cfg.NoSignRequest { awsConfig.Credentials = credentials.AnonymousCredentials - } else if accessKey != "" && secretKey != "" { - awsConfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken) + } else if key.accessKey != "" && key.secretKey != "" { + awsConfig.Credentials = credentials.NewStaticCredentials(key.accessKey, key.secretKey, key.sessionToken) } } @@ -382,3 +380,23 @@ func getHTTPClient(tlsInsecureSkipVerify bool) *http.Client { Timeout: time.Hour, } } + +type awsSecretKey struct { + accessKey string + secretKey string + sessionToken string +} + +func getAWSSecretKeyFromEnv() *awsSecretKey { + key := &awsSecretKey{} + if val, ok := os.LookupEnv(model.AWSAccessKey); ok { + key.accessKey = val + } + if val, ok := os.LookupEnv(model.AWSSecretKey); ok { + key.secretKey = val + } + if val, ok := os.LookupEnv(model.AWSSessionToken); ok { + key.sessionToken = val + } + return key +} diff --git a/store/store.go b/store/store.go index 71d692bda..6a939dd07 100644 --- a/store/store.go +++ b/store/store.go @@ -11,6 +11,7 @@ import ( "github.com/bnb-chain/greenfield-storage-provider/store/metadb/metalevel" "github.com/bnb-chain/greenfield-storage-provider/store/metadb/metasql" "github.com/bnb-chain/greenfield-storage-provider/store/spdb" + "github.com/bnb-chain/greenfield-storage-provider/util/log" ) // NewMetaDB return a meta-db instance @@ -23,8 +24,11 @@ func NewMetaDB(dbType string, levelDBConfig *config.LevelDBConfig, sqlDBConfig * switch dbType { case model.MySqlDB: // load meta db config from env vars - sqlDBConfig.User = os.Getenv(model.MetaDBUser) - sqlDBConfig.Passwd = os.Getenv(model.MetaDBPassword) + sqlDBConfig.User, sqlDBConfig.Passwd, err = getDBConfigFromEnv(model.MetaDBUser, model.MetaDBPassword) + if err != nil { + log.Error("load meta db config from env failed") + return nil, err + } metaDB, err = metasql.NewMetaDB(sqlDBConfig) case model.LevelDB: metaDB, err = metalevel.NewMetaDB(levelDBConfig) @@ -44,8 +48,11 @@ func NewJobDB(dbType string, sqlDBConfig *config.SqlDBConfig) (spdb.JobDB, error switch dbType { case model.MySqlDB: // load job db config from env vars - sqlDBConfig.User = os.Getenv(model.JobDBUser) - sqlDBConfig.Passwd = os.Getenv(model.JobDBPassword) + sqlDBConfig.User, sqlDBConfig.Passwd, err = getDBConfigFromEnv(model.JobDBUser, model.JobDBPassword) + if err != nil { + log.Error("load job db config from env failed") + return nil, err + } jobDB, err = jobsql.NewJobMetaImpl(sqlDBConfig) case model.MemoryDB: jobDB = jobmemory.NewMemJobDB() @@ -54,3 +61,15 @@ func NewJobDB(dbType string, sqlDBConfig *config.SqlDBConfig) (spdb.JobDB, error } return jobDB, err } + +func getDBConfigFromEnv(user, passwd string) (string, string, error) { + userVal, ok := os.LookupEnv(user) + if !ok { + return "", "", fmt.Errorf("db %s config is not set in environment", user) + } + passwdVal, ok := os.LookupEnv(passwd) + if !ok { + return "", "", fmt.Errorf("db %s config is not set in environment", passwd) + } + return userVal, passwdVal, nil +} diff --git a/test/e2e/onebox/setup_onebox.go b/test/e2e/onebox/setup_onebox.go index ddcfd520b..ce23ab4d8 100644 --- a/test/e2e/onebox/setup_onebox.go +++ b/test/e2e/onebox/setup_onebox.go @@ -49,8 +49,8 @@ func initConfig() { if cfg.SyncerCfg.MetaLevelDBConfig == nil { cfg.SyncerCfg.MetaLevelDBConfig = metalevel.DefaultMetaLevelDBConfig } - if cfg.SyncerCfg.PieceConfig == nil { - cfg.SyncerCfg.PieceConfig = storage.DefaultPieceStoreConfig + if cfg.SyncerCfg.PieceStoreConfig == nil { + cfg.SyncerCfg.PieceStoreConfig = storage.DefaultPieceStoreConfig } } @@ -134,7 +134,7 @@ func main() { cfg.SyncerCfg.Address = addr cfg.SyncerCfg.StorageProvider = spDir cfg.SyncerCfg.MetaLevelDBConfig.Path = spDir + "/leveldb" - cfg.SyncerCfg.PieceConfig.Store.BucketURL = spDir + "/piece_store" + cfg.SyncerCfg.PieceStoreConfig.Store.BucketURL = spDir + "/piece_store" if err = util.TomlSettings.NewEncoder(f).Encode(cfg); err != nil { log.Errorw("failed to encode config", "error", err) os.Exit(1)