diff --git a/deployment/localup/localup.sh b/deployment/localup/localup.sh index 88dc13939..4943b18ba 100644 --- a/deployment/localup/localup.sh +++ b/deployment/localup/localup.sh @@ -139,7 +139,7 @@ make_config() { sed -i -e "s/PProfHttpAddress = '.*'/PProfHttpAddress = '${pprof_address}'/g" config.toml # blocksyncer - sed -i -e "s/Modules = \[\]/Modules = \[\'epoch\',\'bucket\',\'object\',\'payment\',\'group\',\'permission\',\'storage_provider\'\]/g" config.toml + sed -i -e "s/Modules = \[\]/Modules = \[\'epoch\',\'bucket\',\'object\',\'payment\',\'group\',\'permission\',\'storage_provider\'\,\'prefix_tree\'\]/g" config.toml sed -i -e "s/RecreateTables = false/RecreateTables = true/g" config.toml WORKERS=50 sed -i -e "s/Workers = 0/Workers = ${WORKERS}/g" config.toml diff --git a/modular/blocksyncer/blocksyncer_options.go b/modular/blocksyncer/blocksyncer_options.go index 97d9e53a2..9335d7217 100644 --- a/modular/blocksyncer/blocksyncer_options.go +++ b/modular/blocksyncer/blocksyncer_options.go @@ -17,7 +17,6 @@ import ( "github.com/forbole/juno/v4/models" "github.com/forbole/juno/v4/modules" "github.com/forbole/juno/v4/modules/messages" - "github.com/forbole/juno/v4/modules/registrar" "github.com/forbole/juno/v4/node/remote" "github.com/forbole/juno/v4/parser" parserconfig "github.com/forbole/juno/v4/parser/config" @@ -30,6 +29,8 @@ import ( coremodule "github.com/bnb-chain/greenfield-storage-provider/core/module" "github.com/bnb-chain/greenfield-storage-provider/model" "github.com/bnb-chain/greenfield-storage-provider/model/errors" + db "github.com/bnb-chain/greenfield-storage-provider/modular/blocksyncer/database" + registrar "github.com/bnb-chain/greenfield-storage-provider/modular/blocksyncer/modules" "github.com/bnb-chain/greenfield-storage-provider/pkg/log" ) @@ -77,9 +78,9 @@ func (s *BlockSyncerModular) initClient() error { // JunoConfig the runner junoConfig := cmd.NewConfig("juno"). WithParseConfig(parsecmdtypes.NewConfig(). - WithRegistrar(registrar.NewDefaultRegistrar( + WithRegistrar(registrar.NewBlockSyncerRegistrar( messages.CosmosMessageAddressesParser, - )).WithFileType("toml"), + )).WithDBBuilder(db.BlockSyncerDBBuilder).WithFileType("toml"), ) cmdCfg := junoConfig.GetParseConfig() cmdCfg.WithTomlConfig(s.config) diff --git a/modular/blocksyncer/database/database.go b/modular/blocksyncer/database/database.go new file mode 100644 index 000000000..261dd57ac --- /dev/null +++ b/modular/blocksyncer/database/database.go @@ -0,0 +1,50 @@ +package database + +import ( + "database/sql" + "errors" + "fmt" + + "github.com/forbole/juno/v4/database" + "github.com/forbole/juno/v4/database/mysql" + "github.com/forbole/juno/v4/database/sqlclient" + "gorm.io/gorm" +) + +var _ database.Database = &DB{} + +// DB represents a SQL database with expanded features. +// so that it can properly store custom BigDipper-related data. +type DB struct { + *mysql.Database +} + +// BlockSyncerDBBuilder allows to create a new DB instance implementing the db.Builder type +func BlockSyncerDBBuilder(ctx *database.Context) (database.Database, error) { + db, err := sqlclient.New(&ctx.Cfg) + if err != nil { + return nil, err + } + return &DB{ + Database: &mysql.Database{ + Impl: database.Impl{ + Db: db, + EncodingConfig: ctx.EncodingConfig, + }, + }, + }, nil +} + +// Cast allows to cast the given db to a DB instance +func Cast(db database.Database) *DB { + bdDatabase, ok := db.(*DB) + if !ok { + panic(fmt.Errorf("given database instance is not a DB")) + } + return bdDatabase +} + +// errIsNotFound check if the error is not found +func errIsNotFound(err error) bool { + return errors.Is(err, sql.ErrNoRows) || errors.Is(err, gorm.ErrRecordNotFound) +} diff --git a/modular/blocksyncer/database/masterdb.go b/modular/blocksyncer/database/masterdb.go new file mode 100644 index 000000000..6dbfd6973 --- /dev/null +++ b/modular/blocksyncer/database/masterdb.go @@ -0,0 +1,28 @@ +package database + +import ( + "context" + + "github.com/forbole/juno/v4/models" + "gorm.io/gorm/clause" +) + +// GetMasterDB get master db info +func (db *DB) GetMasterDB(ctx context.Context) (*models.MasterDB, error) { + var masterDB models.MasterDB + + err := db.Db.Find(&masterDB).Error + if err != nil && !errIsNotFound(err) { + return nil, err + } + return &masterDB, nil +} + +// SetMasterDB set the master db +func (db *DB) SetMasterDB(ctx context.Context, masterDB *models.MasterDB) error { + err := db.Db.Table((&models.MasterDB{}).TableName()).Clauses(clause.OnConflict{ + Columns: []clause.Column{{Name: "one_row_id"}}, + DoUpdates: clause.AssignmentColumns([]string{"is_master"}), + }).Create(masterDB).Error + return err +} diff --git a/modular/blocksyncer/database/prefixtree.go b/modular/blocksyncer/database/prefixtree.go new file mode 100644 index 000000000..226b3f0b0 --- /dev/null +++ b/modular/blocksyncer/database/prefixtree.go @@ -0,0 +1,78 @@ +package database + +import ( + "context" + + "github.com/bnb-chain/greenfield-storage-provider/store/bsdb" + "github.com/forbole/juno/v4/common" + "gorm.io/gorm" +) + +// CreatePrefixTree create prefix tree nodes by input slice +func (db *DB) CreatePrefixTree(ctx context.Context, prefixTree []*bsdb.SlashPrefixTreeNode) error { + err := db.Db.WithContext(ctx).Create(&prefixTree).Error + if err != nil { + return err + } + return nil +} + +// DeletePrefixTree delete prefix tree nodes by given conditions +func (db *DB) DeletePrefixTree(ctx context.Context, prefixTree []*bsdb.SlashPrefixTreeNode) error { + if len(prefixTree) == 0 { + return nil + } + tx := db.Db.WithContext(ctx) + stmt := tx.Where("bucket_name = ? AND full_name = ? AND is_object = ?", + prefixTree[0].BucketName, + prefixTree[0].FullName, + prefixTree[0].IsObject) + + for _, object := range prefixTree[1:] { + stmt = stmt.Or("bucket_name = ? AND full_name = ? AND is_object = ?", + object.BucketName, + object.FullName, + object.IsObject) + } + err := stmt.Unscoped().Delete(&bsdb.SlashPrefixTreeNode{}).Error + if err != nil { + return err + } + return nil +} + +// GetPrefixTree get prefix tree node by full name and bucket name +func (db *DB) GetPrefixTree(ctx context.Context, fullName, bucketName string) (*bsdb.SlashPrefixTreeNode, error) { + var prefixTreeNode *bsdb.SlashPrefixTreeNode + err := db.Db.WithContext(ctx).Where("full_name = ? AND bucket_name = ? AND is_object = ?", fullName, bucketName, false).Take(&prefixTreeNode).Error + if err != nil { + if err == gorm.ErrRecordNotFound { + return nil, nil + } + return nil, err + } + return prefixTreeNode, nil +} + +// GetPrefixTreeObject get prefix tree node object by object id +func (db *DB) GetPrefixTreeObject(ctx context.Context, objectID common.Hash) (*bsdb.SlashPrefixTreeNode, error) { + var prefixTreeNode *bsdb.SlashPrefixTreeNode + err := db.Db.WithContext(ctx).Where("object_id = ?", objectID).Take(&prefixTreeNode).Error + if err != nil { + if err == gorm.ErrRecordNotFound { + return nil, nil + } + return nil, err + } + return prefixTreeNode, nil +} + +// GetPrefixTreeCount get prefix tree nodes count by path and bucket name +func (db *DB) GetPrefixTreeCount(ctx context.Context, pathName, bucketName string) (int64, error) { + var count int64 + err := db.Db.WithContext(ctx).Table((&bsdb.SlashPrefixTreeNode{}).TableName()).Where("bucket_name = ? AND path_name = ?", bucketName, pathName).Count(&count).Error + if err != nil { + return 0, err + } + return count, nil +} diff --git a/modular/blocksyncer/modules/prefixtree/module.go b/modular/blocksyncer/modules/prefixtree/module.go new file mode 100644 index 000000000..a1806c4e8 --- /dev/null +++ b/modular/blocksyncer/modules/prefixtree/module.go @@ -0,0 +1,47 @@ +package prefixtree + +import ( + "context" + + "github.com/forbole/juno/v4/modules" + "gorm.io/gorm/schema" + + "github.com/bnb-chain/greenfield-storage-provider/modular/blocksyncer/database" + "github.com/bnb-chain/greenfield-storage-provider/store/bsdb" +) + +const ( + ModuleName = "prefix_tree" +) + +var ( + _ modules.Module = &Module{} + _ modules.PrepareTablesModule = &Module{} +) + +// Module represents the object module +type Module struct { + db *database.DB +} + +// NewModule builds a new Module instance +func NewModule(db *database.DB) *Module { + return &Module{ + db: db, + } +} + +// Name implements modules.Module +func (m *Module) Name() string { + return ModuleName +} + +// PrepareTables implements +func (m *Module) PrepareTables() error { + return m.db.PrepareTables(context.TODO(), []schema.Tabler{&bsdb.SlashPrefixTreeNode{}}) +} + +// RecreateTables implements +func (m *Module) RecreateTables() error { + return m.db.RecreateTables(context.TODO(), []schema.Tabler{&bsdb.SlashPrefixTreeNode{}}) +} diff --git a/modular/blocksyncer/modules/prefixtree/prefix_tree.go b/modular/blocksyncer/modules/prefixtree/prefix_tree.go new file mode 100644 index 000000000..1711f1771 --- /dev/null +++ b/modular/blocksyncer/modules/prefixtree/prefix_tree.go @@ -0,0 +1,198 @@ +package prefixtree + +import ( + "context" + "errors" + "strings" + + storagetypes "github.com/bnb-chain/greenfield/x/storage/types" + abci "github.com/cometbft/cometbft/abci/types" + tmctypes "github.com/cometbft/cometbft/rpc/core/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/gogoproto/proto" + "github.com/forbole/juno/v4/common" + "github.com/forbole/juno/v4/log" + + "github.com/bnb-chain/greenfield-storage-provider/store/bsdb" +) + +var ( + EventCreateObject = proto.MessageName(&storagetypes.EventCreateObject{}) + EventDeleteObject = proto.MessageName(&storagetypes.EventDeleteObject{}) + EventCancelCreateObject = proto.MessageName(&storagetypes.EventCancelCreateObject{}) + EventRejectSealObject = proto.MessageName(&storagetypes.EventRejectSealObject{}) +) + +// buildPrefixTreeEvents maps event types that trigger the creation or deletion of prefix tree nodes. +// If an event type is present and set to true in this map, +// it means that event will result in changes to the prefix tree structure. +var buildPrefixTreeEvents = map[string]bool{ + EventCreateObject: true, + EventDeleteObject: true, + EventCancelCreateObject: true, + EventRejectSealObject: true, +} + +// HandleEvent handles the events relevant to the building of the PrefixTree. +// It checks the type of the event and calls the appropriate handler for it. +func (m *Module) HandleEvent(ctx context.Context, block *tmctypes.ResultBlock, txHash common.Hash, event sdk.Event) error { + if !buildPrefixTreeEvents[event.Type] { + return nil + } + + typedEvent, err := sdk.ParseTypedEvent(abci.Event(event)) + if err != nil { + log.Errorw("parse typed events error", "module", m.Name(), "event", event, "err", err) + return err + } + + switch event.Type { + case EventCreateObject: + createObject, ok := typedEvent.(*storagetypes.EventCreateObject) + if !ok { + log.Errorw("type assert error", "type", "EventCreateObject", "event", typedEvent) + return errors.New("create object event assert error") + } + return m.handleCreateObject(ctx, createObject) + case EventDeleteObject: + deleteObject, ok := typedEvent.(*storagetypes.EventDeleteObject) + if !ok { + log.Errorw("type assert error", "type", "EventDeleteObject", "event", typedEvent) + return errors.New("delete object event assert error") + } + return m.handleDeleteObject(ctx, deleteObject) + case EventCancelCreateObject: + cancelObject, ok := typedEvent.(*storagetypes.EventCancelCreateObject) + if !ok { + log.Errorw("type assert error", "type", "EventCancelCreateObject", "event", typedEvent) + return errors.New("cancel create object event assert error") + } + return m.handleCancelCreateObject(ctx, cancelObject) + case EventRejectSealObject: + rejectSealObject, ok := typedEvent.(*storagetypes.EventRejectSealObject) + if !ok { + log.Errorw("type assert error", "type", "EventRejectSealObject", "event", typedEvent) + return errors.New("reject seal object event assert error") + } + return m.handleRejectSealObject(ctx, rejectSealObject) + default: + return nil + } +} + +// handleCreateObject handles EventCreateObject. +// It builds the directory tree structure for the object if necessary. +func (m *Module) handleCreateObject(ctx context.Context, sealObject *storagetypes.EventCreateObject) error { + var nodes []*bsdb.SlashPrefixTreeNode + objectPath := sealObject.ObjectName + bucketName := sealObject.BucketName + objectID := sealObject.ObjectId + + // Split full path to get the directories + pathParts := strings.Split(objectPath, "/") + + // Traverse from the deepest directory up to the root + for i := len(pathParts) - 1; i > 0; i-- { + path := strings.Join(pathParts[:i], "/") + "/" + // Check if the current directory exists + tree, err := m.db.GetPrefixTree(ctx, path, bucketName) + if err != nil { + log.Errorw("failed to get prefix tree", "error", err) + return err + } + if tree == nil { + // If the directory does not exist, create it + newNode := &bsdb.SlashPrefixTreeNode{ + PathName: strings.Join(pathParts[:i-1], "/") + "/", + FullName: path, + Name: pathParts[i-1] + "/", + IsObject: false, + IsFolder: true, + BucketName: bucketName, + ObjectName: "", + } + nodes = append(nodes, newNode) + } else { + // If the directory exists, we can break the loop + break + } + } + + object, err := m.db.GetPrefixTreeObject(ctx, common.BigToHash(objectID.BigInt())) + if err != nil { + log.Errorw("failed to get prefix tree object", "error", err) + return err + } + if object == nil { + objectNode := &bsdb.SlashPrefixTreeNode{ + PathName: strings.Join(pathParts[:len(pathParts)-1], "/") + "/", + FullName: objectPath, + Name: pathParts[len(pathParts)-1], + IsObject: true, + IsFolder: false, + BucketName: bucketName, + ObjectID: common.BigToHash(objectID.BigInt()), + ObjectName: objectPath, + } + nodes = append(nodes, objectNode) + } + if len(nodes) == 0 { + return nil + } + return m.db.CreatePrefixTree(ctx, nodes) +} + +// handleDeleteObject handles EventDeleteObject. +// It removes the directory tree structure associated with the object. +func (m *Module) handleDeleteObject(ctx context.Context, deleteObject *storagetypes.EventDeleteObject) error { + return m.deleteObject(ctx, deleteObject.ObjectName, deleteObject.BucketName) +} + +// handleCancelCreateObject handles EventCancelCreateObject. +// It removes the directory tree structure associated with the object. +func (m *Module) handleCancelCreateObject(ctx context.Context, cancelCreateObject *storagetypes.EventCancelCreateObject) error { + return m.deleteObject(ctx, cancelCreateObject.ObjectName, cancelCreateObject.BucketName) +} + +// handleRejectSealObject handles EventRejectSealObject. +// It removes the directory tree structure associated with the object. +func (m *Module) handleRejectSealObject(ctx context.Context, cancelCreateObject *storagetypes.EventRejectSealObject) error { + return m.deleteObject(ctx, cancelCreateObject.ObjectName, cancelCreateObject.BucketName) +} + +// deleteObject according to the given object path and bucket name. +func (m *Module) deleteObject(ctx context.Context, objectPath, bucketName string) error { + var nodes []*bsdb.SlashPrefixTreeNode + + // Split full path to get the directories + pathParts := strings.Split(objectPath, "/") + nodes = append(nodes, &bsdb.SlashPrefixTreeNode{ + FullName: objectPath, + IsObject: true, + BucketName: bucketName, + }) + + // Check and delete any empty parent directories + for i := len(pathParts) - 1; i > 0; i-- { + path := strings.Join(pathParts[:i], "/") + "/" + count, err := m.db.GetPrefixTreeCount(ctx, path, bucketName) + if err != nil { + log.Errorw("failed to get prefix tree count", "error", err) + return err + } + if count <= 1 { + nodes = append(nodes, &bsdb.SlashPrefixTreeNode{ + FullName: path, + IsObject: false, + BucketName: bucketName, + }) + } else { + // Found a non-empty directory, stop here + break + } + } + if len(nodes) == 0 { + return nil + } + return m.db.DeletePrefixTree(ctx, nodes) +} diff --git a/modular/blocksyncer/modules/registrar.go b/modular/blocksyncer/modules/registrar.go new file mode 100644 index 000000000..c1219fdfa --- /dev/null +++ b/modular/blocksyncer/modules/registrar.go @@ -0,0 +1,51 @@ +package registrar + +import ( + "github.com/forbole/juno/v4/modules" + "github.com/forbole/juno/v4/modules/block" + "github.com/forbole/juno/v4/modules/bucket" + "github.com/forbole/juno/v4/modules/epoch" + "github.com/forbole/juno/v4/modules/group" + "github.com/forbole/juno/v4/modules/messages" + "github.com/forbole/juno/v4/modules/object" + "github.com/forbole/juno/v4/modules/payment" + "github.com/forbole/juno/v4/modules/permission" + "github.com/forbole/juno/v4/modules/registrar" + sp "github.com/forbole/juno/v4/modules/storage_provider" + + "github.com/bnb-chain/greenfield-storage-provider/modular/blocksyncer/database" + "github.com/bnb-chain/greenfield-storage-provider/modular/blocksyncer/modules/prefixtree" +) + +var ( + _ registrar.Registrar = &BlockSyncerRegistrar{} +) + +// BlockSyncerRegistrar represents the modules.Registrar that allows to register all modules that are supported by blocksyncer +type BlockSyncerRegistrar struct { + parser messages.MessageAddressesParser +} + +// NewBlockSyncerRegistrar allows to build a new Registrar instance +func NewBlockSyncerRegistrar(parser messages.MessageAddressesParser) *BlockSyncerRegistrar { + return &BlockSyncerRegistrar{ + parser: parser, + } +} + +// BuildModules implements modules.Registrar +func (r *BlockSyncerRegistrar) BuildModules(ctx registrar.Context) modules.Modules { + db := database.Cast(ctx.Database) + + return modules.Modules{ + block.NewModule(db), + bucket.NewModule(db), + object.NewModule(db), + epoch.NewModule(db), + payment.NewModule(db), + permission.NewModule(db), + group.NewModule(db), + sp.NewModule(db), + prefixtree.NewModule(db), + } +} diff --git a/modular/gater/metadata_handler.go b/modular/gater/metadata_handler.go index 27a15b02e..860141d09 100644 --- a/modular/gater/metadata_handler.go +++ b/modular/gater/metadata_handler.go @@ -106,6 +106,12 @@ func (g *GateModular) listObjectsByBucketNameHandler(w http.ResponseWriter, r *h requestDelimiter = queryParams.Get(model.ListObjectsDelimiterQuery) requestPrefix = queryParams.Get(model.ListObjectsPrefixQuery) + if requestDelimiter != "" && requestDelimiter != "/" { + log.CtxErrorw(reqCtx.Context(), "failed to check delimiter", "delimiter", requestDelimiter, "error", err) + err = ErrInvalidQuery + return + } + if err = s3util.CheckValidBucketName(requestBucketName); err != nil { log.CtxErrorw(reqCtx.Context(), "failed to check bucket name", "bucket_name", requestBucketName, "error", err) return diff --git a/store/bsdb/common.go b/store/bsdb/common.go index d0ec8ce80..07448723e 100644 --- a/store/bsdb/common.go +++ b/store/bsdb/common.go @@ -4,5 +4,5 @@ package bsdb type ListObjectsResult struct { PathName string ResultType string - Object + *Object } diff --git a/store/bsdb/const.go b/store/bsdb/const.go index dbd9aef31..872ec84bf 100644 --- a/store/bsdb/const.go +++ b/store/bsdb/const.go @@ -30,4 +30,12 @@ const ( GroupTableName = "groups" // MasterDBTableName defines the name of master db table MasterDBTableName = "master_db" + // PrefixTreeTableName defines the name of prefix tree node table + PrefixTreeTableName = "slash_prefix_tree_nodes" +) + +// define the list objects const +const ( + ObjectName = "object" + CommonPrefix = "common_prefix" ) diff --git a/store/bsdb/filters.go b/store/bsdb/filters.go index 215e854b8..411eb17a4 100644 --- a/store/bsdb/filters.go +++ b/store/bsdb/filters.go @@ -15,3 +15,21 @@ func PrefixFilter(prefix string) func(db *gorm.DB) *gorm.DB { return db.Where("object_name LIKE ?", prefix+"%") } } + +func PathNameFilter(pathName string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + return db.Where("path_name = ?", pathName) + } +} + +func NameFilter(name string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + return db.Where("name like ", name+"%") + } +} + +func FullNameFilter(fullName string) func(db *gorm.DB) *gorm.DB { + return func(db *gorm.DB) *gorm.DB { + return db.Where("full_name >= ?", fullName) + } +} diff --git a/store/bsdb/master_schema.go b/store/bsdb/master_schema.go index f8e7d0840..644d1a4c6 100644 --- a/store/bsdb/master_schema.go +++ b/store/bsdb/master_schema.go @@ -2,9 +2,9 @@ package bsdb // MasterDB stores current master DB type MasterDB struct { - OneRowId bool `gorm:"one_row_id"` + OneRowId bool `gorm:"column:one_row_id;not null;primaryKey"` // IsMaster defines if current DB is master DB - IsMaster bool `gorm:"column:is_master"` + IsMaster bool `gorm:"column:is_master;not null;"` } // TableName is used to set Master table name in database diff --git a/store/bsdb/object.go b/store/bsdb/object.go index 8a60c6a5c..d6b0589d2 100644 --- a/store/bsdb/object.go +++ b/store/bsdb/object.go @@ -31,25 +31,7 @@ func (b *BsDBImpl) ListObjectsByBucketName(bucketName, continuationToken, prefix // 2. Find common prefixes based on the delimiter // 3. Limit results if delimiter != "" { - err = b.db.Raw( - `SELECT path_name, result_type, o.* - FROM ( - SELECT DISTINCT object_name as path_name, 'object' as result_type, id - FROM objects - WHERE bucket_name = ? AND object_name LIKE ? AND object_name >= IF(? = '', '', ?) AND LOCATE(?, SUBSTRING(object_name, LENGTH(?) + 1)) = 0 - UNION - SELECT CONCAT(SUBSTRING(object_name, 1, LENGTH(?) + LOCATE(?, SUBSTRING(object_name, LENGTH(?) + 1)) - 1), ?) as path_name, 'common_prefix' as result_type, MIN(id) - FROM objects - WHERE bucket_name = ? AND object_name LIKE ? AND object_name >= IF(? = '', '', ?) AND LOCATE(?, SUBSTRING(object_name, LENGTH(?) + 1)) > 0 - GROUP BY path_name - ) AS subquery - JOIN objects o ON subquery.id = o.id - ORDER BY path_name - LIMIT ?;`, - bucketName, prefix+"%", continuationToken, continuationToken, delimiter, prefix, - prefix, delimiter, prefix, delimiter, - bucketName, prefix+"%", continuationToken, continuationToken, delimiter, prefix, - limit).Scan(&results).Error + results, err = b.ListObjects(bucketName, continuationToken, prefix, maxKeys) } else { // If delimiter is not specified, retrieve objects directly diff --git a/store/bsdb/prefix.go b/store/bsdb/prefix.go new file mode 100644 index 000000000..d9b7910aa --- /dev/null +++ b/store/bsdb/prefix.go @@ -0,0 +1,118 @@ +package bsdb + +import ( + "path/filepath" + "strings" + + "github.com/forbole/juno/v4/common" + "gorm.io/gorm" +) + +// ListObjects List objects by bucket name +func (b *BsDBImpl) ListObjects(bucketName, continuationToken, prefix string, maxKeys int) ([]*ListObjectsResult, error) { + var ( + nodes []*SlashPrefixTreeNode + filters []func(*gorm.DB) *gorm.DB + res []*ListObjectsResult + pathName string + prefixQuery string + limit int + err error + ) + // return NextContinuationToken by adding 1 additionally + limit = maxKeys + 1 + strings.Split(prefix, "/") + pathName, prefixQuery = processPath(prefix) + if pathName != "" { + filters = append(filters, PathNameFilter(pathName)) + } + if prefixQuery != "" { + filters = append(filters, NameFilter(prefixQuery)) + } + if continuationToken != "" { + filters = append(filters, FullNameFilter(continuationToken)) + } + err = b.db.Table((&SlashPrefixTreeNode{}).TableName()). + Where("bucket_name = ?", bucketName). + Scopes(filters...). + Order("full_name"). + Limit(limit). + Find(&nodes).Error + if err != nil { + return nil, err + } + res, err = b.filterObjects(nodes) + return res, err +} + +// processPath takes in a string that is a path name, and returns two strings: +// the directory part of the path, and the file part of the path. If the path does not contain +// a "/", then the directory is "/" and the file is the path. +func processPath(pathName string) (string, string) { + var ( + dir string + file string + ) + + if !strings.Contains(pathName, "/") { + dir = "/" + file = pathName + } else { + dir, file = filepath.Split(pathName) + } + + return dir, file +} + +// filterObjects filters a slice of SlashPrefixTreeNode for nodes which IsObject attribute is true, +// maps these objects by their ID and transforms them into a ListObjectsResult format. +// Returns a slice of ListObjectsResult containing filtered object data or an error if something goes wrong. +func (b *BsDBImpl) filterObjects(nodes []*SlashPrefixTreeNode) ([]*ListObjectsResult, error) { + var ( + objectIDs []common.Hash + objects []*Object + res []*ListObjectsResult + objectsMap map[common.Hash]*Object + err error + ) + + //filter objects and query the info + for _, node := range nodes { + if node.IsObject { + objectIDs = append(objectIDs, node.ObjectID) + } + } + + err = b.db.Table((&Object{}).TableName()). + Where("object_id in (?)", objectIDs). + Find(&objects).Error + if err != nil { + return nil, err + } + + objectsMap = make(map[common.Hash]*Object) + for _, object := range objects { + objectsMap[object.ObjectID] = object + } + + for _, node := range nodes { + if node.IsObject { + object := objectsMap[node.ObjectID] + if object == nil { + continue + } + res = append(res, &ListObjectsResult{ + PathName: node.FullName, + ResultType: ObjectName, + Object: object, + }) + } else { + res = append(res, &ListObjectsResult{ + PathName: node.FullName, + ResultType: CommonPrefix, + Object: &Object{}, + }) + } + } + return res, nil +} diff --git a/store/bsdb/prefix_schema.go b/store/bsdb/prefix_schema.go new file mode 100644 index 000000000..5d25e5b8c --- /dev/null +++ b/store/bsdb/prefix_schema.go @@ -0,0 +1,25 @@ +package bsdb + +import ( + "github.com/forbole/juno/v4/common" +) + +// SlashPrefixTreeNode A tree structure based on prefixes +type SlashPrefixTreeNode struct { + ID uint64 `gorm:"column:id;primaryKey"` + + PathName string `gorm:"column:path_name;type:varchar(1024);index:idx_bucket_path,priority:2,length:512"` + FullName string `gorm:"column:full_name;type:varchar(1024);index:idx_bucket_full_object,priority:2,length:512"` + Name string `gorm:"column:name;type:varchar(1024)"` + IsObject bool `gorm:"column:is_object;default:false;index:idx_bucket_full_object,priority:3"` + IsFolder bool `gorm:"column:is_folder;default:false"` + + BucketName string `gorm:"column:bucket_name;type:varchar(64);index:idx_bucket_full_object,priority:1;index:idx_bucket_path,priority:1"` + ObjectID common.Hash `gorm:"column:object_id;type:BINARY(32);index:idx_object_id"` + ObjectName string `gorm:"column:object_name;type:varchar(1024)"` +} + +// TableName is used to set SlashPrefixTreeNode table name in database +func (*SlashPrefixTreeNode) TableName() string { + return PrefixTreeTableName +}