diff --git a/Dockerfile b/Dockerfile index ed845c05d..29e9469e5 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,7 +4,7 @@ RUN apk add --no-cache make git bash protoc ADD . /greenfield-storage-provider -ENV CGO_ENABLED=0 +ENV CGO_ENABLED=1 ENV GO111MODULE=on # For Private REPO @@ -12,6 +12,8 @@ ARG GH_TOKEN="" RUN go env -w GOPRIVATE="github.com/bnb-chain/*" RUN git config --global url."https://${GH_TOKEN}@github.com".insteadOf "https://github.com" +RUN apk add build-base libc-dev + RUN cd /greenfield-storage-provider \ && make install-tools \ && make buf-gen \ @@ -42,6 +44,6 @@ COPY --from=builder /greenfield-storage-provider/build/* ${WORKDIR}/ RUN chown -R ${USER_UID}:${USER_GID} ${WORKDIR} USER ${USER_UID}:${USER_GID} -EXPOSE 9033 9133 9233 9333 9433 9533 +EXPOSE 9033 ENTRYPOINT ["/app/storage_provider"] diff --git a/README.md b/README.md index a29e1ae3e..5e07b4a7a 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ bash build.sh cd build # print version ./gnfd-sp --version -# setup secondary sps in the test-env directory(syncer) +# setup secondary sps in the test-env directory(syncer), notice: only run once at first ./setup-test-env # run primary sp(gateway/uploader/downloader/stonehub/stonenode/syncer) ./gnfd-sp -config ./config.toml diff --git a/config/config.toml b/config/config.toml index 0b7eaeccc..2948871e9 100644 --- a/config/config.toml +++ b/config/config.toml @@ -54,7 +54,7 @@ Service = [ StorageProvider = "gnfd-test-sp" Address = "127.0.0.1:9433" StoneHubServiceAddress = "127.0.0.1:9333" - SyncerServiceAddress = ["127.0.0.1:9593", "127.0.0.1:9543", "127.0.0.1:9553", "127.0.0.1:9563", "127.0.0.1:9573", "127.0.0.1:9583"] + SyncerServiceAddress = ["127.0.0.1:9543", "127.0.0.1:9553", "127.0.0.1:9563", "127.0.0.1:9573", "127.0.0.1:9583", "127.0.0.1:9593"] StoneJobLimit = 64 [StoneNodeCfg.PieceConfig] Shards = 0 diff --git a/model/const.go b/model/const.go index ff75ae14d..bce8e3d9c 100644 --- a/model/const.go +++ b/model/const.go @@ -87,3 +87,19 @@ const ( MySqlDB string = "mysql" LevelDB string = "leveldb" ) + +// environment constants +const ( + // AWS environment constants + AWSAccessKey = "AWS_ACCESS_KEY" + AWSSecretKey = "AWS_SECRET_KEY" + AWSSessionToken = "AWS_SESSION_TOKEN" + + // MetaDB environment constants + MetaDBUser = "META_DB_USER" + MetaDBPassword = "META_DB_PASSWORD" + + // JobDB environment constants + JobDBUser = "JOB_DB_USER" + JobDBPassword = "JOB_DB_PASSWORD" +) diff --git a/store/db_util.go b/store/db_util.go index 362e343f9..c7d8423ce 100644 --- a/store/db_util.go +++ b/store/db_util.go @@ -2,6 +2,7 @@ package store import ( "fmt" + "os" "github.com/bnb-chain/greenfield-storage-provider/model" "github.com/bnb-chain/greenfield-storage-provider/store/config" @@ -22,6 +23,9 @@ func NewMetaDB(dbType string, levelDBConfig *config.LevelDBConfig, sqlDBConfig * switch dbType { case model.MySqlDB: + // load meta db config from env vars + sqlDBConfig.User = os.Getenv(model.MetaDBUser) + sqlDBConfig.Passwd = os.Getenv(model.MetaDBPassword) metaDB, err = metasql.NewMetaDB(sqlDBConfig) case model.LevelDB: metaDB, err = metalevel.NewMetaDB(levelDBConfig) @@ -40,6 +44,9 @@ func NewJobDB(dbType string, sqlDBConfig *config.SqlDBConfig) (jobdb.JobDBV2, er switch dbType { case model.MySqlDB: + // load job db config from env vars + sqlDBConfig.User = os.Getenv(model.JobDBUser) + sqlDBConfig.Passwd = os.Getenv(model.JobDBPassword) jobDB, err = jobsql.NewJobMetaImpl(sqlDBConfig) case model.MemoryDB: jobDB = jobmemory.NewMemJobDBV2() diff --git a/store/piecestore/storage/s3.go b/store/piecestore/storage/s3.go index 1fe6f4bd1..c06cafafb 100644 --- a/store/piecestore/storage/s3.go +++ b/store/piecestore/storage/s3.go @@ -229,10 +229,6 @@ func (sc *SessionCache) newSession(cfg ObjectStorageConfig) (*session.Session, s sc.Lock() defer sc.Unlock() - if sess, ok := sc.sessions[cfg]; ok { - return sess, "", nil - } - endpoint, bucketName, region, err := parseEndPoint(cfg.BucketURL) if err != nil { log.Errorw("s3 parseEndPoint error", "error", err) @@ -240,6 +236,10 @@ func (sc *SessionCache) newSession(cfg ObjectStorageConfig) (*session.Session, s } log.Debugw("s3 storage info", "endPoint", endpoint, "bucketName", bucketName, "region", region) + if sess, ok := sc.sessions[cfg]; ok { + return sess, bucketName, nil + } + awsConfig := &aws.Config{ Region: aws.String(region), Endpoint: aws.String(endpoint), @@ -248,11 +248,17 @@ func (sc *SessionCache) newSession(cfg ObjectStorageConfig) (*session.Session, s S3ForcePathStyle: aws.Bool(!isVirtualHostStyle), Retryer: newCustomS3Retryer(cfg.MaxRetries, time.Duration(cfg.MinRetryDelay)), } - if !cfg.TestMode { + // if TestMode is true, you can communicate with private bucket or public bucket, + // in this TestMode, if you want to visit private bucket, you should provide accessKey, secretKey. + // if TestMode is false, you can use service account or ec2 to visit you s3 straightly + if cfg.TestMode { + accessKey := os.Getenv(model.AWSAccessKey) + secretKey := os.Getenv(model.AWSSecretKey) + sessionToken := os.Getenv(model.AWSSessionToken) if cfg.NoSignRequest { awsConfig.Credentials = credentials.AnonymousCredentials - } else if cfg.AccessKey != "" && cfg.SecretKey != "" { - awsConfig.Credentials = credentials.NewStaticCredentials(cfg.AccessKey, cfg.SecretKey, cfg.SessionToken) + } else if accessKey != "" && secretKey != "" { + awsConfig.Credentials = credentials.NewStaticCredentials(accessKey, secretKey, sessionToken) } } diff --git a/store/piecestore/storage/storage_config.go b/store/piecestore/storage/storage_config.go index 43b2efb8e..73e6d6f24 100644 --- a/store/piecestore/storage/storage_config.go +++ b/store/piecestore/storage/storage_config.go @@ -12,9 +12,6 @@ var DefaultPieceStoreConfig = &PieceStoreConfig{ Store: &ObjectStorageConfig{ Storage: "s3", BucketURL: "https://s3.ap-northeast-1.amazonaws.com/example", - AccessKey: "", - SecretKey: "", - SessionToken: "", NoSignRequest: false, MaxRetries: 5, MinRetryDelay: 0, @@ -26,12 +23,9 @@ var DefaultPieceStoreConfig = &PieceStoreConfig{ type ObjectStorageConfig struct { Storage string // backend storage type (e.g. s3, file, memory) BucketURL string // the bucket URL of object storage to store data - AccessKey string // access key for object storage - SecretKey string // secret key for object storage - SessionToken string // temporary credential used to access backend storage NoSignRequest bool // whether access public bucket MaxRetries int // the number of max retries that will be performed MinRetryDelay int64 // the minimum retry delay after which retry will be performed TlsInsecureSkipVerify bool // whether skip the certificate verification of HTTPS requests - TestMode bool // if test mode is true, don't need s3 credentials + TestMode bool // if test mode is true, should provide s3 credentials } diff --git a/test/e2e/piecestore/helper_test.go b/test/e2e/piecestore/helper_test.go index abd2ab6a2..288887a11 100644 --- a/test/e2e/piecestore/helper_test.go +++ b/test/e2e/piecestore/helper_test.go @@ -22,14 +22,11 @@ func setUp(t *testing.T, storageType, bucketURL string) (*piece.PieceStore, erro Store: &storage.ObjectStorageConfig{ Storage: storageType, BucketURL: bucketURL, - AccessKey: "", - SecretKey: "", - SessionToken: "", NoSignRequest: false, MaxRetries: 5, MinRetryDelay: 0, TlsInsecureSkipVerify: false, - TestMode: false, + TestMode: true, }, }) } diff --git a/test/e2e/services/case_driver.go b/test/e2e/services/case_driver.go index 44a9f3f61..654b2e830 100644 --- a/test/e2e/services/case_driver.go +++ b/test/e2e/services/case_driver.go @@ -8,13 +8,13 @@ import ( "net/http" "strings" "time" - - "github.com/bnb-chain/greenfield-sdk-go/pkg/signer" - "github.com/cosmos/cosmos-sdk/testutil/testdata" - + "github.com/bnb-chain/greenfield-storage-provider/config" "github.com/bnb-chain/greenfield-storage-provider/model" "github.com/bnb-chain/greenfield-storage-provider/util/log" + + "github.com/bnb-chain/greenfield-sdk-go/pkg/signer" + "github.com/cosmos/cosmos-sdk/testutil/testdata" ) var (