Skip to content

Commit

Permalink
fix: use env var to get bucket url (#130)
Browse files Browse the repository at this point in the history
* fix: use env var to get bucket url

* fix: improve loading config from env function

* fix: rename variable names

* fix: change piece store readme.md

---------

Co-authored-by: DylanYong <dylan.y@nodereal.io>
  • Loading branch information
sysvm and yzhaoyu authored Feb 21, 2023
1 parent 034ea61 commit 8000741
Show file tree
Hide file tree
Showing 13 changed files with 89 additions and 51 deletions.
2 changes: 1 addition & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ ARG USER=sp
ARG USER_UID=1000
ARG USER_GID=1000

ENV PACKAGES libstdc++ ca-certificates bash curl tini
ENV PACKAGES libstdc++ ca-certificates bash curl
ENV WORKDIR=/app

RUN apk add --no-cache $PACKAGES \
Expand Down
8 changes: 4 additions & 4 deletions config/config.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,9 +59,9 @@ Service = [
StoneHubServiceAddress = "127.0.0.1:9333"
SyncerServiceAddress = ["127.0.0.1:9543", "127.0.0.1:9553", "127.0.0.1:9563", "127.0.0.1:9573", "127.0.0.1:9583", "127.0.0.1:9593"]
StoneJobLimit = 64
[StoneNodeCfg.PieceConfig]
[StoneNodeCfg.PieceStoreConfig]
Shards = 0
[StoneNodeCfg.PieceConfig.Store]
[StoneNodeCfg.PieceStoreConfig.Store]
Storage = "file"
BucketURL = "./data/primary_payload_data"
MaxRetries = 5
Expand All @@ -71,7 +71,7 @@ Service = [
StorageProvider = "gnfd-test-sp"
Address = "127.0.0.1:9533"
MetaDBType = "leveldb"
[SyncerCfg.PieceConfig.Store]
[SyncerCfg.PieceStoreConfig.Store]
Storage = "file"
BucketURL = "./data/secondary_payload_data"
MaxRetries = 5
Expand All @@ -98,7 +98,7 @@ Service = [

[ChallengeCfg]
StorageProvider = "gnfd-test-sp"
Address = "127.0.0.1:9633"
Address = "127.0.0.1:9733"
MetaDBType = "leveldb"
[ChallengeCfg.PieceStoreConfig]
Shards = 0
Expand Down
4 changes: 0 additions & 4 deletions docker-entrypoint.sh

This file was deleted.

3 changes: 3 additions & 0 deletions model/const.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,9 @@ const (

// environment constants
const (
// Piece Store constants
BucketURL = "BUCKET_URL"

// AWS environment constants
AWSAccessKey = "AWS_ACCESS_KEY"
AWSSecretKey = "AWS_SECRET_KEY"
Expand Down
2 changes: 1 addition & 1 deletion service/stonenode/stone_node.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func (node *StoneNodeService) initClient() error {
if node.running.Load() {
return merrors.ErrStoneNodeStarted
}
store, err := client.NewStoreClient(node.cfg.PieceConfig)
store, err := client.NewStoreClient(node.cfg.PieceStoreConfig)
if err != nil {
log.Errorw("stone node inits piece store client failed", "error", err)
return err
Expand Down
4 changes: 2 additions & 2 deletions service/stonenode/stone_node_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ type StoneNodeConfig struct {
StoneHubServiceAddress string
SyncerServiceAddress []string
StorageProvider string
PieceConfig *storage.PieceStoreConfig
PieceStoreConfig *storage.PieceStoreConfig
StoneJobLimit int64
}

Expand All @@ -16,6 +16,6 @@ var DefaultStoneNodeConfig = &StoneNodeConfig{
StoneHubServiceAddress: "127.0.0.1:9333",
SyncerServiceAddress: []string{"127.0.0.1:9533", "127.0.0.1:9543", "127.0.0.1:9553", "127.0.0.1:9563", "127.0.0.1:9573", "127.0.0.1:9583"},
StorageProvider: "bnb-sp",
PieceConfig: storage.DefaultPieceStoreConfig,
PieceStoreConfig: storage.DefaultPieceStoreConfig,
StoneJobLimit: 64,
}
2 changes: 1 addition & 1 deletion service/syncer/syncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func NewSyncerService(config *SyncerConfig) (*Syncer, error) {

// initClient
func (s *Syncer) initClient() error {
store, err := client.NewStoreClient(s.config.PieceConfig)
store, err := client.NewStoreClient(s.config.PieceStoreConfig)
if err != nil {
log.Errorw("syncer starts piece store client failed", "error", err)
return err
Expand Down
4 changes: 2 additions & 2 deletions service/syncer/syncer_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ type SyncerConfig struct {
MetaDBType string
MetaLevelDBConfig *config.LevelDBConfig
MetaSqlDBConfig *config.SqlDBConfig
PieceConfig *storage.PieceStoreConfig
PieceStoreConfig *storage.PieceStoreConfig
}

var DefaultSyncerConfig = &SyncerConfig{
Expand All @@ -23,5 +23,5 @@ var DefaultSyncerConfig = &SyncerConfig{
MetaDBType: model.LevelDB,
MetaLevelDBConfig: metalevel.DefaultMetaLevelDBConfig,
MetaSqlDBConfig: metasql.DefaultMetaSqlDBConfig,
PieceConfig: storage.DefaultPieceStoreConfig,
PieceStoreConfig: storage.DefaultPieceStoreConfig,
}
31 changes: 13 additions & 18 deletions store/piecestore/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,30 +45,25 @@ Amazon S3 can refer this [link](https://docs.aws.amazon.com/IAM/latest/UserGuide

The number of sharding in object storage that supports multi-bucket storage.

### Config example
## Config Note

- Path-style and AccessKey, SecretKey
For safety, access key, secret key nad session token should be configured in environment:

```json
{
"EndPoint": "https://s3.us-east-1.amazonaws.com/exmaple_bucket%d",
"AccessKey": "AccessKey",
"SecretKey": "SecretKey",
"Shards": 5
}
```shell
export AWS_ACCESS_KEY="ACCESSKEY"
export AWS_SECRET_KEY="SECRETKEY"
export AWS_SESSION_TOKEN="SESSIONTOKEN"
```

- Virtual-hosted-style and temporary access credentials
BucketURL can be configured in either environment or config.toml.

```json
{
"EndPoint": "https://example_bucket.s3.us-east-1.amazonaws.com",
"AccessKey": "AccessKey",
"SecretKey": "SecretKey",
"SessionToken": "SessionToken"
}
```shell
export BUCKET_URL="BUCKETURL"
```

If `Shards` is not set in config.json, the shard is 0, PieceStore won't shard.
If BucketURL is configured in environment, all services will use the same bucket ro write and read data.

If `Shards` is not set in config.toml, the shard is 0, PieceStore won't shard.

> More storage providers will be supported
19 changes: 13 additions & 6 deletions store/piecestore/piece/piece_store.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,27 +9,29 @@ import (
"path/filepath"
"runtime"

"github.com/bnb-chain/greenfield-storage-provider/model"
merrors "github.com/bnb-chain/greenfield-storage-provider/model/errors"
"github.com/bnb-chain/greenfield-storage-provider/store/piecestore/storage"
"github.com/bnb-chain/greenfield-storage-provider/util/log"
)

// NewPieceStore returns an instance of PieceStore
func NewPieceStore(pieceConfig *storage.PieceStoreConfig) (*PieceStore, error) {
cfg := checkConfig(pieceConfig)
blob, err := createStorage(cfg)
checkConfig(pieceConfig)
blob, err := createStorage(pieceConfig)
if err != nil {
log.Errorw("create storage error", "error", err)
return nil, err
}
log.Debugf("pieceStore is running", "Storage", cfg.Store.Storage, "BucketURL",
cfg.Store.BucketURL)
log.Debugf("pieceStore is running", "Storage", pieceConfig.Store.Storage,
"shards", pieceConfig.Shards)

return &PieceStore{blob}, nil
}

// checkConfig checks config if right
func checkConfig(cfg *storage.PieceStoreConfig) *storage.PieceStoreConfig {
func checkConfig(cfg *storage.PieceStoreConfig) {
overrideConfigFromEnv(cfg)
if cfg.Shards > 256 {
log.Panicf("too many shards: %d", cfg.Shards)
}
Expand All @@ -50,7 +52,12 @@ func checkConfig(cfg *storage.PieceStoreConfig) *storage.PieceStoreConfig {
cfg.Store.BucketURL = p
cfg.Store.BucketURL += "/"
}
return cfg
}

func overrideConfigFromEnv(cfg *storage.PieceStoreConfig) {
if val, ok := os.LookupEnv(model.BucketURL); ok {
cfg.Store.BucketURL = val
}
}

func createStorage(cfg *storage.PieceStoreConfig) (storage.ObjectStorage, error) {
Expand Down
28 changes: 23 additions & 5 deletions store/piecestore/storage/s3.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,13 +252,11 @@ func (sc *SessionCache) newSession(cfg ObjectStorageConfig) (*session.Session, s
// in this TestMode, if you want to visit private bucket, you should provide accessKey, secretKey.
// if TestMode is false, you can use service account or ec2 to visit you s3 straightly
if cfg.TestMode {
accessKey := os.Getenv(model.AWSAccessKey)
secretKey := os.Getenv(model.AWSSecretKey)
sessionToken := os.Getenv(model.AWSSessionToken)
key := getAWSSecretKeyFromEnv()
if cfg.NoSignRequest {
awsConfig.Credentials = credentials.AnonymousCredentials
} else if accessKey != "" && secretKey != "" {
awsConfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken)
} else if key.accessKey != "" && key.secretKey != "" {
awsConfig.Credentials = credentials.NewStaticCredentials(key.accessKey, key.secretKey, key.sessionToken)
}
}

Expand Down Expand Up @@ -382,3 +380,23 @@ func getHTTPClient(tlsInsecureSkipVerify bool) *http.Client {
Timeout: time.Hour,
}
}

type awsSecretKey struct {
accessKey string
secretKey string
sessionToken string
}

func getAWSSecretKeyFromEnv() *awsSecretKey {
key := &awsSecretKey{}
if val, ok := os.LookupEnv(model.AWSAccessKey); ok {
key.accessKey = val
}
if val, ok := os.LookupEnv(model.AWSSecretKey); ok {
key.secretKey = val
}
if val, ok := os.LookupEnv(model.AWSSessionToken); ok {
key.sessionToken = val
}
return key
}
27 changes: 23 additions & 4 deletions store/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
"github.com/bnb-chain/greenfield-storage-provider/store/metadb/metalevel"
"github.com/bnb-chain/greenfield-storage-provider/store/metadb/metasql"
"github.com/bnb-chain/greenfield-storage-provider/store/spdb"
"github.com/bnb-chain/greenfield-storage-provider/util/log"
)

// NewMetaDB return a meta-db instance
Expand All @@ -23,8 +24,11 @@ func NewMetaDB(dbType string, levelDBConfig *config.LevelDBConfig, sqlDBConfig *
switch dbType {
case model.MySqlDB:
// load meta db config from env vars
sqlDBConfig.User = os.Getenv(model.MetaDBUser)
sqlDBConfig.Passwd = os.Getenv(model.MetaDBPassword)
sqlDBConfig.User, sqlDBConfig.Passwd, err = getDBConfigFromEnv(model.MetaDBUser, model.MetaDBPassword)
if err != nil {
log.Error("load meta db config from env failed")
return nil, err
}
metaDB, err = metasql.NewMetaDB(sqlDBConfig)
case model.LevelDB:
metaDB, err = metalevel.NewMetaDB(levelDBConfig)
Expand All @@ -44,8 +48,11 @@ func NewJobDB(dbType string, sqlDBConfig *config.SqlDBConfig) (spdb.JobDB, error
switch dbType {
case model.MySqlDB:
// load job db config from env vars
sqlDBConfig.User = os.Getenv(model.JobDBUser)
sqlDBConfig.Passwd = os.Getenv(model.JobDBPassword)
sqlDBConfig.User, sqlDBConfig.Passwd, err = getDBConfigFromEnv(model.JobDBUser, model.JobDBPassword)
if err != nil {
log.Error("load job db config from env failed")
return nil, err
}
jobDB, err = jobsql.NewJobMetaImpl(sqlDBConfig)
case model.MemoryDB:
jobDB = jobmemory.NewMemJobDB()
Expand All @@ -54,3 +61,15 @@ func NewJobDB(dbType string, sqlDBConfig *config.SqlDBConfig) (spdb.JobDB, error
}
return jobDB, err
}

func getDBConfigFromEnv(user, passwd string) (string, string, error) {
userVal, ok := os.LookupEnv(user)
if !ok {
return "", "", fmt.Errorf("db %s config is not set in environment", user)
}
passwdVal, ok := os.LookupEnv(passwd)
if !ok {
return "", "", fmt.Errorf("db %s config is not set in environment", passwd)
}
return userVal, passwdVal, nil
}
6 changes: 3 additions & 3 deletions test/e2e/onebox/setup_onebox.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,8 +49,8 @@ func initConfig() {
if cfg.SyncerCfg.MetaLevelDBConfig == nil {
cfg.SyncerCfg.MetaLevelDBConfig = metalevel.DefaultMetaLevelDBConfig
}
if cfg.SyncerCfg.PieceConfig == nil {
cfg.SyncerCfg.PieceConfig = storage.DefaultPieceStoreConfig
if cfg.SyncerCfg.PieceStoreConfig == nil {
cfg.SyncerCfg.PieceStoreConfig = storage.DefaultPieceStoreConfig
}
}

Expand Down Expand Up @@ -134,7 +134,7 @@ func main() {
cfg.SyncerCfg.Address = addr
cfg.SyncerCfg.StorageProvider = spDir
cfg.SyncerCfg.MetaLevelDBConfig.Path = spDir + "/leveldb"
cfg.SyncerCfg.PieceConfig.Store.BucketURL = spDir + "/piece_store"
cfg.SyncerCfg.PieceStoreConfig.Store.BucketURL = spDir + "/piece_store"
if err = util.TomlSettings.NewEncoder(f).Encode(cfg); err != nil {
log.Errorw("failed to encode config", "error", err)
os.Exit(1)
Expand Down

0 comments on commit 8000741

Please sign in to comment.