diff --git a/CHANGELOG.md b/CHANGELOG.md index 9390a7f9e7d8..6991a9b25a5c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -172,6 +172,7 @@ empty coins slice before it is used to create `banktype.MsgSend`. * (authz)[\#11060](https://github.com/cosmos/cosmos-sdk/pull/11060) Support grant with no expire time. * (rosetta) [\#11590](https://github.com/cosmos/cosmos-sdk/pull/11590) Add fee suggestion for rosetta and enable offline mode. Also force set events about Fees to Success to pass reconciliation test. * (types) [\#11959](https://github.com/cosmos/cosmos-sdk/pull/11959) Added `sdk.Coins.Find` helper method to find a coin by denom. +* [\#10174](https://github.com/cosmos/cosmos-sdk/pull/10174) ADR-040: Refactor App to use `v2.MultiStore` ### API Breaking Changes diff --git a/Makefile b/Makefile index c386f74b96c1..3bb01d68838f 100644 --- a/Makefile +++ b/Makefile @@ -70,8 +70,8 @@ ldflags = -X github.com/cosmos/cosmos-sdk/version.Name=sim \ -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(TMVERSION) ifeq ($(ENABLE_ROCKSDB),true) - BUILD_TAGS += rocksdb_build - test_tags += rocksdb_build + BUILD_TAGS += rocksdb + test_tags += rocksdb else $(warning RocksDB support is disabled; to build and test with RocksDB support, set ENABLE_ROCKSDB=true) endif diff --git a/baseapp/abci.go b/baseapp/abci.go index 2aa71a6148b9..d29c79022630 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -2,6 +2,7 @@ package baseapp import ( "crypto/sha256" + "encoding/json" "errors" "fmt" "os" @@ -43,7 +44,7 @@ func (app *BaseApp) InitChain(req abci.RequestInitChain) (res abci.ResponseInitC if req.InitialHeight > 1 { app.initialHeight = req.InitialHeight initHeader = tmproto.Header{ChainID: req.ChainId, Height: req.InitialHeight, Time: req.Time} - err := app.cms.SetInitialVersion(req.InitialHeight) + err := app.cms.SetInitialVersion(uint64(req.InitialHeight)) if err != nil { panic(err) } @@ -144,6 +145,7 @@ func (app *BaseApp) FilterPeerByID(info string) abci.ResponseQuery { // BeginBlock implements the ABCI application interface. func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeginBlock) { + if app.cms.TracingEnabled() { app.cms.SetTracingContext(sdk.TraceContext( map[string]interface{}{"blockHeight": req.Header.Height}, @@ -210,7 +212,7 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // EndBlock implements the ABCI interface. func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBlock) { if app.deliverState.ms.TracingEnabled() { - app.deliverState.ms = app.deliverState.ms.SetTracingContext(nil).(sdk.CacheMultiStore) + app.deliverState.ms.SetTracingContext(nil) } if app.endBlocker != nil { @@ -269,7 +271,7 @@ func (app *BaseApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { // DeliverTx implements the ABCI interface and executes a tx in DeliverTx mode. // State only gets persisted if all messages are valid and get executed successfully. -// Otherwise, the ResponseDeliverTx will contain releveant error information. +// Otherwise, the ResponseDeliverTx will contain relevant error information. // Regardless of tx execution outcome, the ResponseDeliverTx will contain relevant // gas execution context. func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { @@ -311,7 +313,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { // Write the DeliverTx state into branched storage and commit the MultiStore. // The write to the DeliverTx state writes all state transitions to the root - // MultiStore (app.cms) so when Commit() is called is persists those values. + // MultiStore (app.cms) so when Commit() is called it persists those values. app.deliverState.ms.Write() commitID := app.cms.Commit() app.logger.Info("commit synced", "commit", fmt.Sprintf("%X", commitID)) @@ -622,7 +624,7 @@ func (app *BaseApp) createQueryContext(height int64, prove bool) (sdk.Context, e ) } - cacheMS, err := app.cms.CacheMultiStoreWithVersion(height) + version, err := app.cms.GetVersion(height) if err != nil { return sdk.Context{}, sdkerrors.Wrapf( @@ -631,9 +633,9 @@ func (app *BaseApp) createQueryContext(height int64, prove bool) (sdk.Context, e ) } - // branch the commit-multistore for safety ctx := sdk.NewContext( - cacheMS, app.checkState.ctx.BlockHeader(), true, app.logger, + version.CacheWrap(), // branch the commit-multistore for safety + app.checkState.ctx.BlockHeader(), true, app.logger, ).WithMinGasPrices(app.minGasPrices).WithBlockHeight(height) return ctx, nil @@ -746,6 +748,22 @@ func handleQueryApp(app *BaseApp, path []string, req abci.RequestQuery) abci.Res Value: []byte(app.version), } + case "snapshots": + var responseValue []byte + + response := app.ListSnapshots(abci.RequestListSnapshots{}) + + responseValue, err := json.Marshal(response) + if err != nil { + sdkerrors.QueryResult(sdkerrors.Wrap(err, fmt.Sprintf("failed to marshal list snapshots response %v", response)), false) + } + + return abci.ResponseQuery{ + Codespace: sdkerrors.RootCodespace, + Height: req.Height, + Value: responseValue, + } + default: return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unknown query: %s", path), app.trace) } diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go index c33b3c79ecf7..ea7406090cbb 100644 --- a/baseapp/abci_test.go +++ b/baseapp/abci_test.go @@ -7,20 +7,20 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmprototypes "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" + "github.com/cosmos/cosmos-sdk/db/memdb" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/mock" ) func TestGetBlockRentionHeight(t *testing.T) { logger := defaultLogger() - db := dbm.NewMemDB() name := t.Name() - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) + snapshotStore, err := snapshots.NewStore(memdb.NewDB(), testutil.GetTempDir(t)) require.NoError(t, err) testCases := map[string]struct { @@ -30,20 +30,20 @@ func TestGetBlockRentionHeight(t *testing.T) { expected int64 }{ "defaults": { - bapp: NewBaseApp(name, logger, db, nil), + bapp: NewBaseApp(name, logger, memdb.NewDB(), nil), maxAgeBlocks: 0, commitHeight: 499000, expected: 0, }, "pruning unbonding time only": { - bapp: NewBaseApp(name, logger, db, nil, SetMinRetainBlocks(1)), + bapp: NewBaseApp(name, logger, memdb.NewDB(), nil, SetMinRetainBlocks(1)), maxAgeBlocks: 362880, commitHeight: 499000, expected: 136120, }, "pruning iavl snapshot only": { bapp: NewBaseApp( - name, logger, db, nil, + name, logger, memdb.NewDB(), nil, SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), SetMinRetainBlocks(1), SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(10000, 1)), @@ -54,7 +54,7 @@ func TestGetBlockRentionHeight(t *testing.T) { }, "pruning state sync snapshot only": { bapp: NewBaseApp( - name, logger, db, nil, + name, logger, memdb.NewDB(), nil, SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), SetMinRetainBlocks(1), ), @@ -64,7 +64,7 @@ func TestGetBlockRentionHeight(t *testing.T) { }, "pruning min retention only": { bapp: NewBaseApp( - name, logger, db, nil, + name, logger, memdb.NewDB(), nil, SetMinRetainBlocks(400000), ), maxAgeBlocks: 0, @@ -73,7 +73,7 @@ func TestGetBlockRentionHeight(t *testing.T) { }, "pruning all conditions": { bapp: NewBaseApp( - name, logger, db, nil, + name, logger, memdb.NewDB(), nil, SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), SetMinRetainBlocks(400000), SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), @@ -84,7 +84,7 @@ func TestGetBlockRentionHeight(t *testing.T) { }, "no pruning due to no persisted state": { bapp: NewBaseApp( - name, logger, db, nil, + name, logger, memdb.NewDB(), nil, SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), SetMinRetainBlocks(400000), SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), @@ -95,7 +95,7 @@ func TestGetBlockRentionHeight(t *testing.T) { }, "disable pruning": { bapp: NewBaseApp( - name, logger, db, nil, + name, logger, memdb.NewDB(), nil, SetPruning(pruningtypes.NewCustomPruningOptions(0, 0)), SetMinRetainBlocks(0), SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(50000, 3)), @@ -109,7 +109,7 @@ func TestGetBlockRentionHeight(t *testing.T) { for name, tc := range testCases { tc := tc - tc.bapp.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + tc.bapp.SetParamStore(mock.NewParamStore(memdb.NewDB())) tc.bapp.InitChain(abci.RequestInitChain{ ConsensusParams: &tmprototypes.ConsensusParams{ Evidence: &tmprototypes.EvidenceParams{ @@ -121,6 +121,7 @@ func TestGetBlockRentionHeight(t *testing.T) { t.Run(name, func(t *testing.T) { require.Equal(t, tc.expected, tc.bapp.GetBlockRetentionHeight(tc.commitHeight)) }) + require.NoError(t, tc.bapp.CloseStore()) } } @@ -132,7 +133,7 @@ func TestBaseAppCreateQueryContext(t *testing.T) { t.Parallel() logger := defaultLogger() - db := dbm.NewMemDB() + db := memdb.NewDB() name := t.Name() app := NewBaseApp(name, logger, db, nil) diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 096228614922..d1077fe2ca78 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -9,13 +9,12 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" codectypes "github.com/cosmos/cosmos-sdk/codec/types" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/snapshots" - "github.com/cosmos/cosmos-sdk/store" - "github.com/cosmos/cosmos-sdk/store/rootmulti" - storetypes "github.com/cosmos/cosmos-sdk/store/types" + stypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" @@ -26,6 +25,9 @@ const ( runTxModeReCheck // Recheck a (pending) transaction after a commit runTxModeSimulate // Simulate a transaction runTxModeDeliver // Deliver a transaction + + OptionOrderDefault = iota + OptionOrderAfterStore ) var _ abci.Application = (*BaseApp)(nil) @@ -34,21 +36,38 @@ type ( // Enum mode for app.runTx runTxMode uint8 - // StoreLoader defines a customizable function to control how we load the CommitMultiStore - // from disk. This is useful for state migration, when loading a datastore written with - // an older version of the software. In particular, if a module changed the substore key name - // (or removed a substore) between two versions of the software. - StoreLoader func(ms sdk.CommitMultiStore) error + // StoreOption provides a functional callback to modify StoreParams. + // The callback is passed the loaded height as uint64. + // This can be used to control how we load the CommitMultiStore from disk. This is useful for + // state migration, when loading a datastore written with an older version of the software. + // In particular, if a module changed the substore key name (or removed a substore) between + // two versions of the software. + StoreOption func(*multi.StoreParams, uint64) error + + // AppOption provides a configuration option for a BaseApp + AppOption interface { + Apply(*BaseApp) + Order() OptionOrder + } + // OptionOrder represents the required ordering for order dependent options + OptionOrder int + // AppOptionFunc wraps a functional option for BaseApp + AppOptionFunc func(*BaseApp) + // AppOptionOrdered wraps an order-dependent functional option + AppOptionOrdered struct { + AppOptionFunc + order OptionOrder + } ) // BaseApp reflects the ABCI application implementation. type BaseApp struct { // nolint: maligned // initialized on creation logger log.Logger - name string // application name from abci.Info - db dbm.DB // common DB backend + name string // application name from abci.Info + db dbm.Connection + storeOpts []StoreOption // options to configure root store cms sdk.CommitMultiStore // Main (uncached) state - storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() router sdk.Router // handle any kind of legacy message queryRouter sdk.QueryRouter // router for redirecting query calls grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls @@ -135,20 +154,27 @@ type BaseApp struct { // nolint: maligned abciListeners []ABCIListener } +func (opt AppOptionOrdered) Order() OptionOrder { return opt.order } + +func (opt AppOptionFunc) Apply(app *BaseApp) { opt(app) } +func (opt AppOptionFunc) Order() OptionOrder { return OptionOrderDefault } + +// StoreOption implements AppOption, and can be passed to the app constructor. +func (opt StoreOption) Apply(app *BaseApp) { app.storeOpts = append(app.storeOpts, opt) } +func (opt StoreOption) Order() OptionOrder { return OptionOrderDefault } + // NewBaseApp returns a reference to an initialized BaseApp. It accepts a // variadic number of option functions, which act on the BaseApp to set // configuration choices. // // NOTE: The db is used to store the version number for now. func NewBaseApp( - name string, logger log.Logger, db dbm.DB, txDecoder sdk.TxDecoder, options ...func(*BaseApp), + name string, logger log.Logger, db dbm.Connection, txDecoder sdk.TxDecoder, options ...AppOption, ) *BaseApp { app := &BaseApp{ logger: logger, name: name, db: db, - cms: store.NewCommitMultiStore(db), - storeLoader: DefaultStoreLoader, router: NewRouter(), queryRouter: NewQueryRouter(), grpcQueryRouter: NewGRPCQueryRouter(), @@ -157,12 +183,20 @@ func NewBaseApp( fauxMerkleMode: false, } + var afterStoreOpts []AppOption for _, option := range options { - option(app) + if int(option.Order()) > int(OptionOrderDefault) { + afterStoreOpts = append(afterStoreOpts, option) + } else { + option.Apply(app) + } } - if app.interBlockCache != nil { - app.cms.SetInterBlockCache(app.interBlockCache) + if err := app.loadStore(); err != nil { + panic(err) + } + for _, option := range afterStoreOpts { + option.Apply(app) } app.runTxRecoveryMiddleware = newDefaultRecoveryMiddleware() @@ -195,121 +229,39 @@ func (app *BaseApp) Trace() bool { return app.trace } -// MsgServiceRouter returns the MsgServiceRouter of a BaseApp. -func (app *BaseApp) MsgServiceRouter() *MsgServiceRouter { return app.msgServiceRouter } - -// SetMsgServiceRouter sets the MsgServiceRouter of a BaseApp. -func (app *BaseApp) SetMsgServiceRouter(msgServiceRouter *MsgServiceRouter) { - app.msgServiceRouter = msgServiceRouter -} - -// MountStores mounts all IAVL or DB stores to the provided keys in the BaseApp -// multistore. -func (app *BaseApp) MountStores(keys ...storetypes.StoreKey) { - for _, key := range keys { - switch key.(type) { - case *storetypes.KVStoreKey: - if !app.fauxMerkleMode { - app.MountStore(key, storetypes.StoreTypeIAVL) - } else { - // StoreTypeDB doesn't do anything upon commit, and it doesn't - // retain history, but it's useful for faster simulation. - app.MountStore(key, storetypes.StoreTypeDB) - } - - case *storetypes.TransientStoreKey: - app.MountStore(key, storetypes.StoreTypeTransient) - - case *storetypes.MemoryStoreKey: - app.MountStore(key, storetypes.StoreTypeMemory) - - default: - panic(fmt.Sprintf("Unrecognized store key type :%T", key)) - } +func (app *BaseApp) loadStore() error { + versions, err := app.db.Versions() + if err != nil { + return err } -} - -// MountKVStores mounts all IAVL or DB stores to the provided keys in the -// BaseApp multistore. -func (app *BaseApp) MountKVStores(keys map[string]*storetypes.KVStoreKey) { - for _, key := range keys { - if !app.fauxMerkleMode { - app.MountStore(key, storetypes.StoreTypeIAVL) - } else { - // StoreTypeDB doesn't do anything upon commit, and it doesn't - // retain history, but it's useful for faster simulation. - app.MountStore(key, storetypes.StoreTypeDB) + latest := versions.Last() + config := multi.DefaultStoreParams() + for _, opt := range app.storeOpts { + if err = opt(&config, latest); err != nil { + return err } } -} - -// MountTransientStores mounts all transient stores to the provided keys in -// the BaseApp multistore. -func (app *BaseApp) MountTransientStores(keys map[string]*storetypes.TransientStoreKey) { - for _, key := range keys { - app.MountStore(key, storetypes.StoreTypeTransient) - } -} - -// MountMemoryStores mounts all in-memory KVStores with the BaseApp's internal -// commit multi-store. -func (app *BaseApp) MountMemoryStores(keys map[string]*storetypes.MemoryStoreKey) { - for _, memKey := range keys { - app.MountStore(memKey, storetypes.StoreTypeMemory) - } -} - -// MountStore mounts a store to the provided key in the BaseApp multistore, -// using the default DB. -func (app *BaseApp) MountStore(key storetypes.StoreKey, typ storetypes.StoreType) { - app.cms.MountStoreWithDB(key, typ, nil) -} - -// LoadLatestVersion loads the latest application version. It will panic if -// called more than once on a running BaseApp. -func (app *BaseApp) LoadLatestVersion() error { - err := app.storeLoader(app.cms) + app.cms, err = multi.NewV1MultiStoreAsV2(app.db, config) if err != nil { - return fmt.Errorf("failed to load latest version: %w", err) + return fmt.Errorf("failed to load store: %w", err) } - - return app.Init() -} - -// DefaultStoreLoader will be used by default and loads the latest version -func DefaultStoreLoader(ms sdk.CommitMultiStore) error { - return ms.LoadLatestVersion() + return nil } -// CommitMultiStore returns the root multi-store. -// App constructor can use this to access the `cms`. -// UNSAFE: only safe to use during app initialization. -func (app *BaseApp) CommitMultiStore() sdk.CommitMultiStore { - if app.sealed { - panic("cannot call CommitMultiStore() after baseapp is sealed") - } - return app.cms -} +// MsgServiceRouter returns the MsgServiceRouter of a BaseApp. +func (app *BaseApp) MsgServiceRouter() *MsgServiceRouter { return app.msgServiceRouter } -// SnapshotManager returns the snapshot manager. -// application use this to register extra extension snapshotters. -func (app *BaseApp) SnapshotManager() *snapshots.Manager { - return app.snapshotManager +// SetMsgServiceRouter sets the MsgServiceRouter of a BaseApp. +func (app *BaseApp) SetMsgServiceRouter(msgServiceRouter *MsgServiceRouter) { + app.msgServiceRouter = msgServiceRouter } -// LoadVersion loads the BaseApp application version. It will panic if called -// more than once on a running baseapp. -func (app *BaseApp) LoadVersion(version int64) error { - err := app.cms.LoadVersion(version) - if err != nil { - return fmt.Errorf("failed to load version %d: %w", version, err) - } - - return app.Init() +func (app *BaseApp) CloseStore() error { + return app.cms.Close() } // LastCommitID returns the last CommitID of the multistore. -func (app *BaseApp) LastCommitID() storetypes.CommitID { +func (app *BaseApp) LastCommitID() stypes.CommitID { return app.cms.LastCommitID() } @@ -324,18 +276,13 @@ func (app *BaseApp) LastBlockHeight() int64 { // nil otherwise. Panics if the app is already sealed. func (app *BaseApp) Init() error { if app.sealed { - panic("cannot call initFromMainStore: baseapp already sealed") + panic("cannot call Init: baseapp already sealed") } // needed for the export command which inits from store but never calls initchain app.setCheckState(tmproto.Header{}) app.Seal() - - rms, ok := app.cms.(*rootmulti.Store) - if !ok { - return fmt.Errorf("invalid commit multi-store; expected %T, got: %T", &rootmulti.Store{}, app.cms) - } - return rms.GetPruning().Validate() + return app.cms.GetPruning().Validate() } func (app *BaseApp) setMinGasPrices(gasPrices sdk.DecCoins) { @@ -354,10 +301,6 @@ func (app *BaseApp) setMinRetainBlocks(minRetainBlocks uint64) { app.minRetainBlocks = minRetainBlocks } -func (app *BaseApp) setInterBlockCache(cache sdk.MultiStorePersistentCache) { - app.interBlockCache = cache -} - func (app *BaseApp) setTrace(trace bool) { app.trace = trace } @@ -395,7 +338,7 @@ func (app *BaseApp) IsSealed() bool { return app.sealed } // provided header, and minimum gas prices set. It is set on InitChain and reset // on Commit. func (app *BaseApp) setCheckState(header tmproto.Header) { - ms := app.cms.CacheMultiStore() + ms := app.cms.CacheWrap() app.checkState = &state{ ms: ms, ctx: sdk.NewContext(ms, header, true, app.logger).WithMinGasPrices(app.minGasPrices), @@ -407,7 +350,7 @@ func (app *BaseApp) setCheckState(header tmproto.Header) { // and provided header. It is set on InitChain and BeginBlock and set to nil on // Commit. func (app *BaseApp) setDeliverState(header tmproto.Header) { - ms := app.cms.CacheMultiStore() + ms := app.cms.CacheWrap() app.deliverState = &state{ ms: ms, ctx: sdk.NewContext(ms, header, false, app.logger), @@ -571,15 +514,15 @@ func (app *BaseApp) getContextForTx(mode runTxMode, txBytes []byte) sdk.Context func (app *BaseApp) cacheTxContext(ctx sdk.Context, txBytes []byte) (sdk.Context, sdk.CacheMultiStore) { ms := ctx.MultiStore() // TODO: https://github.com/cosmos/cosmos-sdk/issues/2824 - msCache := ms.CacheMultiStore() + msCache := ms.CacheWrap() if msCache.TracingEnabled() { - msCache = msCache.SetTracingContext( + msCache.SetTracingContext( sdk.TraceContext( map[string]interface{}{ "txHash": fmt.Sprintf("%X", tmhash.Sum(txBytes)), }, ), - ).(sdk.CacheMultiStore) + ) } return ctx.WithMultiStore(msCache), msCache diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 5be9331b6260..474e84e50464 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -17,16 +17,18 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" + dbm "github.com/cosmos/cosmos-sdk/db" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - "github.com/cosmos/cosmos-sdk/store/rootmulti" storetypes "github.com/cosmos/cosmos-sdk/store/types" + stypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/mock" "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" @@ -42,10 +44,6 @@ var ( testTxPriority = int64(42) ) -type paramStore struct { - db *dbm.MemDB -} - type setupConfig struct { blocks uint64 blockTxs int @@ -54,44 +52,11 @@ type setupConfig struct { pruningOpts pruningtypes.PruningOptions } -func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { - bz, err := json.Marshal(value) - if err != nil { - panic(err) - } - - ps.db.Set(key, bz) -} - -func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { - ok, err := ps.db.Has(key) - if err != nil { - panic(err) - } - - return ok -} - -func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { - bz, err := ps.db.Get(key) - if err != nil { - panic(err) - } - - if len(bz) == 0 { - return - } - - if err := json.Unmarshal(bz, ptr); err != nil { - panic(err) - } -} - func defaultLogger() log.Logger { return log.MustNewDefaultLogger("plain", "info", false).With("module", "sdk/app") } -func newBaseApp(name string, options ...func(*BaseApp)) *BaseApp { +func newBaseApp(name string, options ...AppOption) *BaseApp { logger := defaultLogger() db := dbm.NewMemDB() codec := codec.NewLegacyAmino() @@ -120,15 +85,15 @@ func aminoTxEncoder() sdk.TxEncoder { } // simple one store baseapp -func setupBaseApp(t *testing.T, options ...func(*BaseApp)) *BaseApp { +func setupBaseApp(t *testing.T, options ...AppOption) *BaseApp { + options = append(options, SetSubstores(capKey1, capKey2)) app := newBaseApp(t.Name(), options...) require.Equal(t, t.Name(), app.Name()) - app.MountStores(capKey1, capKey2) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + app.SetParamStore(mock.NewParamStore(dbm.NewMemDB())) // stores are mounted - err := app.LoadLatestVersion() + err := app.Init() require.Nil(t, err) return app } @@ -140,7 +105,7 @@ func setupBaseAppWithSnapshots(t *testing.T, config *setupConfig) (*BaseApp, err routerOpt := func(bapp *BaseApp) { bapp.Router().AddRoute(sdk.NewRoute(routeMsgKeyValue, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { kv := msg.(*msgKeyValue) - bapp.cms.GetCommitKVStore(capKey2).Set(kv.Key, kv.Value) + bapp.cms.GetKVStore(capKey2).Set(kv.Key, kv.Value) return &sdk.Result{}, nil })) } @@ -149,7 +114,10 @@ func setupBaseAppWithSnapshots(t *testing.T, config *setupConfig) (*BaseApp, err snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) require.NoError(t, err) - app := setupBaseApp(t, routerOpt, SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(config.snapshotInterval, uint32(config.snapshotKeepRecent))), SetPruning(config.pruningOpts)) + app := setupBaseApp(t, + AppOptionFunc(routerOpt), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(config.snapshotInterval, uint32(config.snapshotKeepRecent))), + SetPruning(config.pruningOpts)) app.InitChain(abci.RequestInitChain{}) @@ -199,14 +167,13 @@ func TestMountStores(t *testing.T) { app := setupBaseApp(t) // check both stores - store1 := app.cms.GetCommitKVStore(capKey1) + store1 := app.cms.GetKVStore(capKey1) require.NotNil(t, store1) - store2 := app.cms.GetCommitKVStore(capKey2) + store2 := app.cms.GetKVStore(capKey2) require.NotNil(t, store2) } // Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. func TestLoadVersion(t *testing.T) { logger := defaultLogger() pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) @@ -215,10 +182,10 @@ func TestLoadVersion(t *testing.T) { app := NewBaseApp(name, logger, db, nil, pruningOpt) // make a cap key and mount the store - err := app.LoadLatestVersion() // needed to make stores non-nil + err := app.Init() // needed to make stores non-nil require.Nil(t, err) - emptyCommitID := storetypes.CommitID{} + emptyCommitID := stypes.CommitID{} // fresh store has zero/empty last commit lastHeight := app.LastBlockHeight() @@ -229,118 +196,54 @@ func TestLoadVersion(t *testing.T) { // execute a block, collect commit ID header := tmproto.Header{Height: 1} app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := storetypes.CommitID{Version: 1, Hash: res.Data} + _ = app.Commit() // execute a block, collect commit ID header = tmproto.Header{Height: 2} app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res = app.Commit() - commitID2 := storetypes.CommitID{Version: 2, Hash: res.Data} - - // reload with LoadLatestVersion - app = NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores() - err = app.LoadLatestVersion() - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(2), commitID2) + res := app.Commit() + commitID2 := stypes.CommitID{Version: 2, Hash: res.Data} + app.CloseStore() - // reload with LoadVersion, see if you can commit the same block and get - // the same result + // reload with latest version app = NewBaseApp(name, logger, db, nil, pruningOpt) - err = app.LoadVersion(1) + err = app.Init() require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() testLoadVersionHelper(t, app, int64(2), commitID2) } -func useDefaultLoader(app *BaseApp) { - app.SetStoreLoader(DefaultStoreLoader) -} - -func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) +func initStore(t *testing.T, db dbm.Connection, storeKey string, k, v []byte) { key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) + opts := multi.DefaultStoreParams() + opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) + require.NoError(t, opts.RegisterSubstore(key, stypes.StoreTypePersistent)) + rs, err := multi.NewV1MultiStoreAsV2(db, opts) + require.NoError(t, err) require.Equal(t, int64(0), rs.LastCommitID().Version) // write some data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) + kv := rs.GetKVStore(key) require.NotNil(t, kv) kv.Set(k, v) commitID := rs.Commit() require.Equal(t, int64(1), commitID.Version) + require.NoError(t, rs.Close()) } -func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) +func checkStore(t *testing.T, db dbm.Connection, ver int64, storeKey string, k, v []byte) { + opts := multi.DefaultStoreParams() + opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) + require.NoError(t, opts.RegisterSubstore(key, stypes.StoreTypePersistent)) + rs, err := multi.NewV1MultiStoreAsV2(db, opts) + require.NoError(t, err) require.Equal(t, ver, rs.LastCommitID().Version) // query data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) + kv := rs.GetKVStore(key) require.NotNil(t, kv) require.Equal(t, v, kv.Get(k)) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestSetLoader(t *testing.T) { - cases := map[string]struct { - setLoader func(*BaseApp) - origStoreKey string - loadStoreKey string - }{ - "don't set loader": { - origStoreKey: "foo", - loadStoreKey: "foo", - }, - "default loader": { - setLoader: useDefaultLoader, - origStoreKey: "foo", - loadStoreKey: "foo", - }, - } - - k := []byte("key") - v := []byte("value") - - for name, tc := range cases { - tc := tc - t.Run(name, func(t *testing.T) { - // prepare a db with some data - db := dbm.NewMemDB() - initStore(t, db, tc.origStoreKey, k, v) - - // load the app with the existing db - opts := []func(*BaseApp){SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} - if tc.setLoader != nil { - opts = append(opts, tc.setLoader) - } - app := NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) - app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) - err := app.LoadLatestVersion() - require.Nil(t, err) - - // "execute" one block - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) - res := app.Commit() - require.NotNil(t, res.Data) - - // check db is properly updated - checkStore(t, db, 2, tc.loadStoreKey, k, v) - checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) - }) - } + require.NoError(t, rs.Close()) } func TestVersionSetterGetter(t *testing.T) { @@ -349,7 +252,6 @@ func TestVersionSetterGetter(t *testing.T) { db := dbm.NewMemDB() name := t.Name() app := NewBaseApp(name, logger, db, nil, pruningOpt) - require.Equal(t, "", app.Version()) res := app.Query(abci.RequestQuery{Path: "app/version"}) require.True(t, res.IsOK()) @@ -363,54 +265,20 @@ func TestVersionSetterGetter(t *testing.T) { require.Equal(t, versionString, string(res.Value)) } -func TestLoadVersionInvalid(t *testing.T) { - logger := log.NewNopLogger() - pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - err := app.LoadLatestVersion() - require.Nil(t, err) - - // require error when loading an invalid version - err = app.LoadVersion(-1) - require.Error(t, err) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := storetypes.CommitID{Version: 1, Hash: res.Data} - - // create a new app with the stores mounted under the same cap key - app = NewBaseApp(name, logger, db, nil, pruningOpt) - - // require we can load the latest version - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - - // require error when loading an invalid version - err = app.LoadVersion(2) - require.Error(t, err) -} - func TestLoadVersionPruning(t *testing.T) { logger := log.NewNopLogger() pruningOptions := pruningtypes.NewCustomPruningOptions(10, 15) pruningOpt := SetPruning(pruningOptions) + capKey := sdk.NewKVStoreKey("key1") + schemaOpt := SetSubstores(capKey) db := dbm.NewMemDB() name := t.Name() app := NewBaseApp(name, logger, db, nil, pruningOpt) - // make a cap key and mount the store - capKey := sdk.NewKVStoreKey("key1") - app.MountStores(capKey) - - err := app.LoadLatestVersion() // needed to make stores non-nil + err := app.Init() // needed to make stores non-nil require.Nil(t, err) - emptyCommitID := storetypes.CommitID{} + emptyCommitID := stypes.CommitID{} // fresh store has zero/empty last commit lastHeight := app.LastBlockHeight() @@ -418,31 +286,35 @@ func TestLoadVersionPruning(t *testing.T) { require.Equal(t, int64(0), lastHeight) require.Equal(t, emptyCommitID, lastID) - var lastCommitID storetypes.CommitID + var lastCommitID stypes.CommitID // Commit seven blocks, of which 7 (latest) is kept in addition to 6, 5 // (keep recent) and 3 (keep every). for i := int64(1); i <= 7; i++ { app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: i}}) res := app.Commit() - lastCommitID = storetypes.CommitID{Version: i, Hash: res.Data} + lastCommitID = stypes.CommitID{Version: i, Hash: res.Data} } + // TODO: behavior change - + // CacheMultiStoreWithVersion returned no error on missing version (?) for _, v := range []int64{1, 2, 4} { - _, err = app.cms.CacheMultiStoreWithVersion(v) - require.NoError(t, err) + s, err := app.cms.GetVersion(v) + require.NotNil(t, s) + require.NoError(t, err, "version=%v", v) } for _, v := range []int64{3, 5, 6, 7} { - _, err = app.cms.CacheMultiStoreWithVersion(v) - require.NoError(t, err) + s, err := app.cms.GetVersion(v) + require.NotNil(t, s) + require.NoError(t, err, "version=%v", v) } + require.NoError(t, app.CloseStore()) - // reload with LoadLatestVersion, check it loads last version - app = NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores(capKey) + // reload app, check it loads last version + app = NewBaseApp(name, logger, db, nil, pruningOpt, schemaOpt) - err = app.LoadLatestVersion() + err = app.Init() require.Nil(t, err) testLoadVersionHelper(t, app, int64(7), lastCommitID) } @@ -458,13 +330,11 @@ func TestOptionFunction(t *testing.T) { logger := defaultLogger() db := dbm.NewMemDB() bap := NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) - require.Equal(t, bap.name, "new name", "BaseApp should have had name changed via option function") + require.Equal(t, "new name", bap.Name(), "BaseApp should have had name changed via option function") } -func testChangeNameHelper(name string) func(*BaseApp) { - return func(bap *BaseApp) { - bap.name = name - } +func testChangeNameHelper(name string) AppOptionFunc { + return func(bap *BaseApp) { bap.SetName(name) } } // Test that txs can be unmarshalled and read and that @@ -511,12 +381,6 @@ func TestBaseAppOptionSeal(t *testing.T) { require.Panics(t, func() { app.SetVersion("") }) - require.Panics(t, func() { - app.SetDB(nil) - }) - require.Panics(t, func() { - app.SetCMS(nil) - }) require.Panics(t, func() { app.SetInitChainer(nil) }) @@ -555,10 +419,10 @@ func TestInitChainer(t *testing.T) { // we can reload the same app later db := dbm.NewMemDB() logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) capKey := sdk.NewKVStoreKey("main") capKey2 := sdk.NewKVStoreKey("key2") - app.MountStores(capKey, capKey2) + schemaOpt := SetSubstores(capKey, capKey2) + app := NewBaseApp(name, logger, db, nil, schemaOpt) // set a value in the store on init chain key, value := []byte("hello"), []byte("goodbye") @@ -576,13 +440,13 @@ func TestInitChainer(t *testing.T) { // initChainer is nil - nothing happens app.InitChain(abci.RequestInitChain{}) res := app.Query(query) - require.Equal(t, 0, len(res.Value)) + require.Nil(t, res.Value) // set initChainer and try again - should see the value app.SetInitChainer(initChainer) // stores are mounted and private members are set - sealing baseapp - err := app.LoadLatestVersion() // needed to make stores non-nil + err := app.Init() require.Nil(t, err) require.Equal(t, int64(0), app.LastBlockHeight()) @@ -606,14 +470,16 @@ func TestInitChainer(t *testing.T) { app.Commit() res = app.Query(query) + require.True(t, res.IsOK(), res.Log) require.Equal(t, int64(1), app.LastBlockHeight()) require.Equal(t, value, res.Value) + require.NoError(t, app.CloseStore()) // reload app - app = NewBaseApp(name, logger, db, nil) + app = NewBaseApp(name, logger, db, nil, schemaOpt) app.SetInitChainer(initChainer) - app.MountStores(capKey, capKey2) - err = app.LoadLatestVersion() // needed to make stores non-nil + + err = app.Init() require.Nil(t, err) require.Equal(t, int64(1), app.LastBlockHeight()) @@ -845,7 +711,7 @@ func counterEvent(evType string, msgCount int64) sdk.Events { } } -func handlerMsgCounter(t *testing.T, capKey storetypes.StoreKey, deliverKey []byte) sdk.Handler { +func handlerMsgCounter(t *testing.T, capKey stypes.StoreKey, deliverKey []byte) sdk.Handler { return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { ctx = ctx.WithEventManager(sdk.NewEventManager()) store := ctx.KVStore(capKey) @@ -920,12 +786,13 @@ func TestCheckTx(t *testing.T) { anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } routerOpt := func(bapp *BaseApp) { // TODO: can remove this once CheckTx doesnt process msgs. - bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - })) + bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, + func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { + return &sdk.Result{}, nil + })) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) nTxs := int64(5) app.InitChain(abci.RequestInitChain{}) @@ -979,7 +846,7 @@ func TestDeliverTx(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) app.InitChain(abci.RequestInitChain{}) // Create same codec used in txDecoder @@ -1035,7 +902,7 @@ func TestMultiMsgDeliverTx(t *testing.T) { bapp.Router().AddRoute(r2) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) // Create same codec used in txDecoder codec := codec.NewLegacyAmino() @@ -1114,7 +981,7 @@ func TestSimulateTx(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) app.InitChain(abci.RequestInitChain{}) @@ -1178,7 +1045,7 @@ func TestRunInvalidTransaction(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) header := tmproto.Header{Height: 1} app.BeginBlock(abci.RequestBeginBlock{Header: header}) @@ -1305,7 +1172,7 @@ func TestTxGasLimits(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) header := tmproto.Header{Height: 1} app.BeginBlock(abci.RequestBeginBlock{Header: header}) @@ -1355,106 +1222,6 @@ func TestTxGasLimits(t *testing.T) { } } -// Test that transactions exceeding gas limits fail -func TestMaxBlockGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &tmproto.ConsensusParams{ - Block: &tmproto.BlockParams{ - MaxGas: 100, - }, - }, - }) - - testCases := []struct { - tx *txTest - numDelivers int - gasUsedPerDeliver uint64 - fail bool - failAfterDeliver int - }{ - {newTxCounter(0, 0), 0, 0, false, 0}, - {newTxCounter(9, 1), 2, 10, false, 0}, - {newTxCounter(10, 0), 3, 10, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, - {newTxCounter(2, 7), 11, 9, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, // hit the limit but pass - - {newTxCounter(10, 0), 11, 10, true, 10}, - {newTxCounter(10, 0), 15, 10, true, 10}, - {newTxCounter(9, 0), 12, 9, true, 11}, // fly past the limit - } - - for i, tc := range testCases { - tx := tc.tx - - // reset the block gas - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute the transaction multiple times - for j := 0; j < tc.numDelivers; j++ { - _, result, err := app.SimDeliver(aminoTxEncoder(), tx) - - ctx := app.getState(runTxModeDeliver).ctx - - // check for failed transactions - if tc.fail && (j+1) > tc.failAfterDeliver { - require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - require.True(t, ctx.BlockGasMeter().IsOutOfGas()) - } else { - // check gas used and wanted - blockGasUsed := ctx.BlockGasMeter().GasConsumed() - expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) - require.Equal( - t, expBlockGasUsed, blockGasUsed, - fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), - ) - - require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) - require.False(t, ctx.BlockGasMeter().IsPastLimit()) - } - } - } -} - // Test custom panic handling within app.DeliverTx method func TestCustomRunTxPanicHandler(t *testing.T) { const customPanicMsg = "test panic" @@ -1472,7 +1239,7 @@ func TestCustomRunTxPanicHandler(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) header := tmproto.Header{Height: 1} app.BeginBlock(abci.RequestBeginBlock{Header: header}) @@ -1511,7 +1278,7 @@ func TestBaseAppAnteHandler(t *testing.T) { } cdc := codec.NewLegacyAmino() - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) app.InitChain(abci.RequestInitChain{}) registerTestCodec(cdc) @@ -1614,7 +1381,7 @@ func TestGasConsumptionBadTx(t *testing.T) { cdc := codec.NewLegacyAmino() registerTestCodec(cdc) - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) app.InitChain(abci.RequestInitChain{ ConsensusParams: &tmproto.ConsensusParams{ Block: &tmproto.BlockParams{ @@ -1665,7 +1432,7 @@ func TestQuery(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) app.InitChain(abci.RequestInitChain{}) @@ -1713,7 +1480,7 @@ func TestGRPCQuery(t *testing.T) { ) } - app := setupBaseApp(t, grpcQueryOpt) + app := setupBaseApp(t, AppOptionFunc(grpcQueryOpt)) app.GRPCQueryRouter().SetInterfaceRegistry(codectypes.NewInterfaceRegistry()) app.InitChain(abci.RequestInitChain{}) @@ -1756,7 +1523,7 @@ func TestP2PQuery(t *testing.T) { }) } - app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) + app := setupBaseApp(t, AppOptionFunc(addrPeerFilterOpt), AppOptionFunc(idPeerFilterOpt)) addrQuery := abci.RequestQuery{ Path: "/p2p/filter/addr/1.1.1.1:8000", @@ -1801,17 +1568,34 @@ func TestListSnapshots(t *testing.T) { app, err := setupBaseAppWithSnapshots(t, setupConfig) require.NoError(t, err) + expected := abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ + {Height: 4, Format: snapshottypes.CurrentFormat, Chunks: 2}, + {Height: 2, Format: snapshottypes.CurrentFormat, Chunks: 1}, + }} + resp := app.ListSnapshots(abci.RequestListSnapshots{}) - for _, s := range resp.Snapshots { + queryResponse := app.Query(abci.RequestQuery{ + Path: "/app/snapshots", + }) + + queryListSnapshotsResp := abci.ResponseListSnapshots{} + err = json.Unmarshal(queryResponse.Value, &queryListSnapshotsResp) + require.NoError(t, err) + + for i, s := range resp.Snapshots { + querySnapshot := queryListSnapshotsResp.Snapshots[i] + // we check that the query snapshot and function snapshot are equal + // Then we check that the hash and metadata are not empty. We atm + // do not have a good way to generate the expected value for these. + assert.Equal(t, *s, *querySnapshot) assert.NotEmpty(t, s.Hash) assert.NotEmpty(t, s.Metadata) + // Set hash and metadata to nil, so we can check the other snapshot + // fields against expected s.Hash = nil s.Metadata = nil } - assert.Equal(t, abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ - {Height: 4, Format: snapshottypes.CurrentFormat, Chunks: 2}, - {Height: 2, Format: snapshottypes.CurrentFormat, Chunks: 1}, - }}, resp) + assert.Equal(t, expected, resp) } func TestSnapshotWithPruning(t *testing.T) { @@ -2149,7 +1933,7 @@ func TestWithRouter(t *testing.T) { bapp.Router().AddRoute(r) } - app := setupBaseApp(t, anteOpt, routerOpt) + app := setupBaseApp(t, AppOptionFunc(anteOpt), AppOptionFunc(routerOpt)) app.InitChain(abci.RequestInitChain{}) // Create same codec used in txDecoder @@ -2191,7 +1975,7 @@ func TestBaseApp_EndBlock(t *testing.T) { } app := NewBaseApp(name, logger, db, nil) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + app.SetParamStore(mock.NewParamStore(dbm.NewMemDB())) app.InitChain(abci.RequestInitChain{ ConsensusParams: cp, }) @@ -2210,3 +1994,162 @@ func TestBaseApp_EndBlock(t *testing.T) { require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) } + +func TestBaseApp_Init(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := defaultLogger() + + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) + require.NoError(t, err) + + testCases := map[string]struct { + bapp *BaseApp + expectedPruning pruningtypes.PruningOptions + expectedSnapshot snapshottypes.SnapshotOptions + expectedErr error + }{ + "snapshot but pruning unset": { + NewBaseApp(name, logger, db, nil, + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + snapshottypes.NewSnapshotOptions(1500, 2), + // if no pruning is set, the default is PruneNothing + nil, + }, + "pruning everything only": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningEverything)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), + nil, + }, + "pruning nothing only": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), + nil, + }, + "pruning default only": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), + nil, + }, + "pruning custom only": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + ), + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), + nil, + }, + "pruning everything and snapshots": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningEverything)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), + snapshottypes.NewSnapshotOptions(1500, 2), + nil, + }, + "pruning nothing and snapshots": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + snapshottypes.NewSnapshotOptions(1500, 2), + nil, + }, + "pruning default and snapshots": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + snapshottypes.NewSnapshotOptions(1500, 2), + nil, + }, + "pruning custom and snapshots": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(1500, 2), + nil, + }, + "error custom pruning 0 interval": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(10, 0)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewCustomPruningOptions(10, 0), + snapshottypes.NewSnapshotOptions(1500, 2), + pruningtypes.ErrPruningIntervalZero, + }, + "error custom pruning too small interval": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(10, 9)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewCustomPruningOptions(10, 9), + snapshottypes.NewSnapshotOptions(1500, 2), + pruningtypes.ErrPruningIntervalTooSmall, + }, + "error custom pruning too small keep recent": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(1, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 2)), + ), + pruningtypes.NewCustomPruningOptions(1, 10), + snapshottypes.NewSnapshotOptions(1500, 2), + pruningtypes.ErrPruningKeepRecentTooSmall, + }, + "snapshot zero interval - manager not set": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(0, 2)), + ), + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(snapshottypes.SnapshotIntervalOff, 0), + nil, + }, + "snapshot zero keep recent - allowed": { + NewBaseApp(name, logger, db, nil, + SetPruning(pruningtypes.NewCustomPruningOptions(10, 10)), + SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(1500, 0)), + ), + pruningtypes.NewCustomPruningOptions(10, 10), + snapshottypes.NewSnapshotOptions(1500, 0), // 0 snapshot-keep-recent means keep all + nil, + }, + } + + for _, tc := range testCases { + // Init and validate + require.Equal(t, tc.expectedErr, tc.bapp.Init()) + if tc.expectedErr != nil { + continue + } + + // Check that settings were set correctly + actualPruning := tc.bapp.cms.GetPruning() + require.Equal(t, tc.expectedPruning, actualPruning) + + if tc.expectedSnapshot.Interval == snapshottypes.SnapshotIntervalOff { + require.Nil(t, tc.bapp.snapshotManager) + continue + } + + require.Equal(t, tc.expectedSnapshot.Interval, tc.bapp.snapshotManager.GetInterval()) + require.Equal(t, tc.expectedSnapshot.KeepRecent, tc.bapp.snapshotManager.GetKeepRecent()) + } +} diff --git a/baseapp/block_gas_test.go b/baseapp/block_gas_test.go index 815fff51fc4b..c6d0f13ef31f 100644 --- a/baseapp/block_gas_test.go +++ b/baseapp/block_gas_test.go @@ -10,7 +10,6 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1" "cosmossdk.io/depinject" @@ -20,6 +19,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/runtime" store "github.com/cosmos/cosmos-sdk/store/types" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" @@ -100,8 +100,8 @@ func TestBaseApp_BlockGas(t *testing.T) { &appBuilder) require.NoError(t, err) - bapp := appBuilder.Build(log.NewNopLogger(), dbm.NewMemDB(), nil) - err = bapp.Load(true) + bapp := appBuilder.Build(log.NewNopLogger(), memdb.NewDB(), nil) + err = bapp.Load() require.NoError(t, err) t.Run(tc.name, func(t *testing.T) { diff --git a/baseapp/grpcrouter_test.go b/baseapp/grpcrouter_test.go index 20a88f40f6b4..7fa890d294fa 100644 --- a/baseapp/grpcrouter_test.go +++ b/baseapp/grpcrouter_test.go @@ -7,11 +7,11 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" "cosmossdk.io/depinject" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/testutil/testdata" "github.com/cosmos/cosmos-sdk/testutil/testdata_pulsar" @@ -57,7 +57,7 @@ func TestRegisterQueryServiceTwice(t *testing.T) { var appBuilder *runtime.AppBuilder err := depinject.Inject(makeMinimalConfig(), &appBuilder) require.NoError(t, err) - db := dbm.NewMemDB() + db := memdb.NewDB() app := appBuilder.Build(log.MustNewDefaultLogger("plain", "info", false), db, nil) // First time registering service shouldn't panic. diff --git a/baseapp/msg_service_router_test.go b/baseapp/msg_service_router_test.go index adb12040273c..c93c21bfc159 100644 --- a/baseapp/msg_service_router_test.go +++ b/baseapp/msg_service_router_test.go @@ -7,12 +7,12 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "cosmossdk.io/depinject" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/testutil/testdata" "github.com/cosmos/cosmos-sdk/types/tx/signing" @@ -28,7 +28,7 @@ func TestRegisterMsgService(t *testing.T) { ) err := depinject.Inject(makeMinimalConfig(), &appBuilder, ®istry) require.NoError(t, err) - app := appBuilder.Build(log.MustNewDefaultLogger("plain", "info", false), dbm.NewMemDB(), nil) + app := appBuilder.Build(log.MustNewDefaultLogger("plain", "info", false), memdb.NewDB(), nil) require.Panics(t, func() { testdata.RegisterMsgServer( @@ -56,7 +56,7 @@ func TestRegisterMsgServiceTwice(t *testing.T) { ) err := depinject.Inject(makeMinimalConfig(), &appBuilder, ®istry) require.NoError(t, err) - db := dbm.NewMemDB() + db := memdb.NewDB() app := appBuilder.Build(log.MustNewDefaultLogger("plain", "info", false), db, nil) testdata.RegisterInterfaces(registry) @@ -87,7 +87,7 @@ func TestMsgService(t *testing.T) { ) err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc, &interfaceRegistry) require.NoError(t, err) - app := appBuilder.Build(log.NewNopLogger(), dbm.NewMemDB(), nil) + app := appBuilder.Build(log.NewNopLogger(), memdb.NewDB(), nil) // patch in TxConfig instead of using an output from x/auth/tx txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) diff --git a/baseapp/options.go b/baseapp/options.go index 57abd2b19960..bbbaf16ea5e6 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -4,13 +4,12 @@ import ( "fmt" "io" - dbm "github.com/tendermint/tm-db" - "github.com/cosmos/cosmos-sdk/codec/types" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - "github.com/cosmos/cosmos-sdk/store" + storetypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -18,12 +17,12 @@ import ( // for options that need access to non-exported fields of the BaseApp // SetPruning sets a pruning option on the multistore associated with the app -func SetPruning(opts pruningtypes.PruningOptions) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.cms.SetPruning(opts) } +func SetPruning(opts pruningtypes.PruningOptions) StoreOption { + return func(config *multi.StoreParams, _ uint64) error { config.Pruning = opts; return nil } } // SetMinGasPrices returns an option that sets the minimum gas prices on the app. -func SetMinGasPrices(gasPricesStr string) func(*BaseApp) { +func SetMinGasPrices(gasPricesStr string) AppOptionFunc { gasPrices, err := sdk.ParseDecCoins(gasPricesStr) if err != nil { panic(fmt.Sprintf("invalid minimum gas prices: %v", err)) @@ -33,46 +32,83 @@ func SetMinGasPrices(gasPricesStr string) func(*BaseApp) { } // SetHaltHeight returns a BaseApp option function that sets the halt block height. -func SetHaltHeight(blockHeight uint64) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.setHaltHeight(blockHeight) } +func SetHaltHeight(blockHeight uint64) AppOptionFunc { + return func(bap *BaseApp) { bap.setHaltHeight(blockHeight) } } // SetHaltTime returns a BaseApp option function that sets the halt block time. -func SetHaltTime(haltTime uint64) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.setHaltTime(haltTime) } +func SetHaltTime(haltTime uint64) AppOptionFunc { + return func(bap *BaseApp) { bap.setHaltTime(haltTime) } } // SetMinRetainBlocks returns a BaseApp option function that sets the minimum // block retention height value when determining which heights to prune during // ABCI Commit. -func SetMinRetainBlocks(minRetainBlocks uint64) func(*BaseApp) { +func SetMinRetainBlocks(minRetainBlocks uint64) AppOptionFunc { return func(bapp *BaseApp) { bapp.setMinRetainBlocks(minRetainBlocks) } } // SetTrace will turn on or off trace flag -func SetTrace(trace bool) func(*BaseApp) { +func SetTrace(trace bool) AppOptionFunc { return func(app *BaseApp) { app.setTrace(trace) } } // SetIndexEvents provides a BaseApp option function that sets the events to index. -func SetIndexEvents(ie []string) func(*BaseApp) { +func SetIndexEvents(ie []string) AppOptionFunc { return func(app *BaseApp) { app.setIndexEvents(ie) } } -// SetIAVLCacheSize provides a BaseApp option function that sets the size of IAVL cache. -func SetIAVLCacheSize(size int) func(*BaseApp) { - return func(bapp *BaseApp) { bapp.cms.SetIAVLCacheSize(size) } -} - // SetInterBlockCache provides a BaseApp option function that sets the // inter-block cache. -func SetInterBlockCache(cache sdk.MultiStorePersistentCache) func(*BaseApp) { - return func(app *BaseApp) { app.setInterBlockCache(cache) } +func SetInterBlockCache(cache sdk.MultiStorePersistentCache) AppOptionFunc { + opt := func(cfg *multi.StoreParams, v uint64) error { + cfg.PersistentCache = cache + return nil + } + return func(app *BaseApp) { app.storeOpts = append(app.storeOpts, opt) } +} + +// SetSubstores registers substores according to app configuration +func SetSubstores(keys ...storetypes.StoreKey) StoreOption { + return func(config *multi.StoreParams, _ uint64) error { + for _, key := range keys { + typ, err := storetypes.StoreKeyToType(key) + if err != nil { + return err + } + if err = config.RegisterSubstore(key, typ); err != nil { + return err + } + } + return nil + } +} + +func SetSubstoresFromMaps( + keys map[string]*storetypes.KVStoreKey, + tkeys map[string]*storetypes.TransientStoreKey, + memKeys map[string]*storetypes.MemoryStoreKey, +) StoreOption { + return func(params *multi.StoreParams, _ uint64) error { + if err := multi.RegisterSubstoresFromMap(params, keys); err != nil { + return err + } + if err := multi.RegisterSubstoresFromMap(params, tkeys); err != nil { + return err + } + if err := multi.RegisterSubstoresFromMap(params, memKeys); err != nil { + return err + } + return nil + } } // SetSnapshot sets the snapshot store. -func SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) func(*BaseApp) { - return func(app *BaseApp) { app.SetSnapshot(snapshotStore, opts) } +func SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) AppOption { + return AppOptionOrdered{ + func(app *BaseApp) { app.SetSnapshot(snapshotStore, opts) }, + OptionOrderAfterStore, + } } func (app *BaseApp) SetName(name string) { @@ -105,22 +141,6 @@ func (app *BaseApp) SetProtocolVersion(v uint64) { app.appVersion = v } -func (app *BaseApp) SetDB(db dbm.DB) { - if app.sealed { - panic("SetDB() on sealed BaseApp") - } - - app.db = db -} - -func (app *BaseApp) SetCMS(cms store.CommitMultiStore) { - if app.sealed { - panic("SetEndBlocker() on sealed BaseApp") - } - - app.cms = cms -} - func (app *BaseApp) SetInitChainer(initChainer sdk.InitChainer) { if app.sealed { panic("SetInitChainer() on sealed BaseApp") @@ -188,16 +208,11 @@ func (app *BaseApp) SetFauxMerkleMode() { // SetCommitMultiStoreTracer sets the store tracer on the BaseApp's underlying // CommitMultiStore. func (app *BaseApp) SetCommitMultiStoreTracer(w io.Writer) { - app.cms.SetTracer(w) -} - -// SetStoreLoader allows us to customize the rootMultiStore initialization. -func (app *BaseApp) SetStoreLoader(loader StoreLoader) { - if app.sealed { - panic("SetStoreLoader() on sealed BaseApp") + opt := func(cfg *multi.StoreParams, v uint64) error { + cfg.TraceWriter = w + return nil } - - app.storeLoader = loader + app.storeOpts = append(app.storeOpts, opt) } // SetRouter allows us to customize the router. diff --git a/baseapp/state.go b/baseapp/state.go index addc89cb342c..90cf7bdba429 100644 --- a/baseapp/state.go +++ b/baseapp/state.go @@ -11,8 +11,8 @@ type state struct { // CacheMultiStore calls and returns a CacheMultiStore on the state's underling // CacheMultiStore. -func (st *state) CacheMultiStore() sdk.CacheMultiStore { - return st.ms.CacheMultiStore() +func (st *state) CacheWrap() sdk.CacheMultiStore { + return st.ms.CacheWrap() } // Context returns the Context of the state. diff --git a/baseapp/test_helpers.go b/baseapp/test_helpers.go index 6489595bc8c1..49081943a2b3 100644 --- a/baseapp/test_helpers.go +++ b/baseapp/test_helpers.go @@ -50,6 +50,15 @@ func (app *BaseApp) NewUncachedContext(isCheckTx bool, header tmproto.Header) sd return sdk.NewContext(app.cms, header, isCheckTx, app.logger) } +// NewContextAt creates a context using a (read-only) store at a given block height. +func (app *BaseApp) NewContextAt(isCheckTx bool, header tmproto.Header, height int64) (sdk.Context, error) { + view, err := app.cms.GetVersion(height) + if err != nil { + return sdk.Context{}, err + } + return sdk.NewContext(view.CacheWrap(), header, isCheckTx, app.logger), nil +} + func (app *BaseApp) GetContextForDeliverTx(txBytes []byte) sdk.Context { return app.getContextForTx(runTxModeDeliver, txBytes) } diff --git a/client/grpc_query_test.go b/client/grpc_query_test.go index b0b2b0afbcfe..792039356ec0 100644 --- a/client/grpc_query_test.go +++ b/client/grpc_query_test.go @@ -9,7 +9,6 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -18,6 +17,7 @@ import ( "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/testutil/sims" "github.com/cosmos/cosmos-sdk/testutil/testdata" @@ -55,7 +55,7 @@ func (s *IntegrationTestSuite) SetupSuite() { s.NoError(err) app := appBuilder.Build(log.NewNopLogger(), dbm.NewMemDB(), nil) - err = app.Load(true) + err = app.Load() s.NoError(err) valSet, err := sims.CreateRandomValidatorSet() diff --git a/db/badgerdb/creator.go b/db/badgerdb/creator.go new file mode 100644 index 000000000000..fab6f22760a7 --- /dev/null +++ b/db/badgerdb/creator.go @@ -0,0 +1,17 @@ +// Enabled by default + +package badgerdb + +import ( + "path/filepath" + + "github.com/cosmos/cosmos-sdk/db/types" +) + +func init() { + creator := func(name string, dir string) (types.Connection, error) { + dir = filepath.Join(dir, name) + return NewDB(dir) + } + types.RegisterCreator(types.BadgerDBBackend, creator, false) +} diff --git a/db/badgerdb/db.go b/db/badgerdb/db.go index f5757639ea11..41a55329f5f1 100644 --- a/db/badgerdb/db.go +++ b/db/badgerdb/db.go @@ -11,8 +11,8 @@ import ( "sync" "sync/atomic" - "github.com/cosmos/cosmos-sdk/db" dbutil "github.com/cosmos/cosmos-sdk/db/internal" + "github.com/cosmos/cosmos-sdk/db/types" "github.com/dgraph-io/badger/v3" bpb "github.com/dgraph-io/badger/v3/pb" @@ -22,10 +22,11 @@ import ( var versionsFilename = "versions.csv" var ( - _ db.Connection = (*BadgerDB)(nil) - _ db.Reader = (*badgerTxn)(nil) - _ db.Writer = (*badgerWriter)(nil) - _ db.ReadWriter = (*badgerWriter)(nil) + _ types.Connection = (*BadgerDB)(nil) + _ types.Reader = (*badgerTxn)(nil) + _ types.Writer = (*badgerWriter)(nil) + _ types.ReadWriter = (*badgerWriter)(nil) + _ types.VersionSet = (*versionManager)(nil) ) // BadgerDB is a connection to a BadgerDB key-value database. @@ -62,7 +63,7 @@ type badgerIterator struct { // commit to see current state. So we must use commit increments that are more // granular than our version interval, and map versions to the corresponding timestamp. type versionManager struct { - *db.VersionManager + *types.VersionManager vmap map[uint64]uint64 lastTs uint64 } @@ -81,8 +82,7 @@ func NewDB(dir string) (*BadgerDB, error) { return NewDBWithOptions(opts) } -// NewDBWithOptions creates a BadgerDB key-value database with the specified Options -// (https://pkg.go.dev/github.com/dgraph-io/badger/v3#Options) +// NewDBWithOptions creates a BadgerDB key-value database with the specified Options. func NewDBWithOptions(opts badger.Options) (*BadgerDB, error) { d, err := badger.OpenManaged(opts) if err != nil { @@ -111,6 +111,7 @@ func readVersionsFile(path string) (*versionManager, error) { if err != nil { return nil, err } + file.Close() var ( versions []uint64 lastTs uint64 @@ -127,11 +128,12 @@ func readVersionsFile(path string) (*versionManager, error) { } if version == 0 { // 0 maps to the latest timestamp lastTs = ts + continue } versions = append(versions, version) vmap[version] = ts } - vmgr := db.NewVersionManager(versions) + vmgr := types.NewVersionManager(versions) return &versionManager{ VersionManager: vmgr, vmap: vmap, @@ -141,12 +143,6 @@ func readVersionsFile(path string) (*versionManager, error) { // Write version metadata to CSV file func writeVersionsFile(vm *versionManager, path string) error { - file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE, 0o644) - if err != nil { - return err - } - defer file.Close() - w := csv.NewWriter(file) rows := [][]string{ {"0", strconv.FormatUint(vm.lastTs, 10)}, } @@ -161,27 +157,33 @@ func writeVersionsFile(vm *versionManager, path string) error { strconv.FormatUint(ts, 10), }) } + file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return err + } + defer file.Close() + w := csv.NewWriter(file) return w.WriteAll(rows) } -func (b *BadgerDB) Reader() db.Reader { +func (b *BadgerDB) Reader() types.Reader { b.mtx.RLock() ts := b.vmgr.lastTs b.mtx.RUnlock() return &badgerTxn{txn: b.db.NewTransactionAt(ts, false), db: b} } -func (b *BadgerDB) ReaderAt(version uint64) (db.Reader, error) { +func (b *BadgerDB) ReaderAt(version uint64) (types.Reader, error) { b.mtx.RLock() defer b.mtx.RUnlock() ts, has := b.vmgr.versionTs(version) if !has { - return nil, db.ErrVersionDoesNotExist + return nil, types.ErrVersionDoesNotExist } return &badgerTxn{txn: b.db.NewTransactionAt(ts, false), db: b}, nil } -func (b *BadgerDB) ReadWriter() db.ReadWriter { +func (b *BadgerDB) ReadWriter() types.ReadWriter { atomic.AddInt32(&b.openWriters, 1) b.mtx.RLock() ts := b.vmgr.lastTs @@ -189,7 +191,7 @@ func (b *BadgerDB) ReadWriter() db.ReadWriter { return &badgerWriter{badgerTxn{txn: b.db.NewTransactionAt(ts, true), db: b}, false} } -func (b *BadgerDB) Writer() db.Writer { +func (b *BadgerDB) Writer() types.Writer { // Badger has a WriteBatch, but it doesn't support conflict detection return b.ReadWriter() } @@ -197,13 +199,16 @@ func (b *BadgerDB) Writer() db.Writer { func (b *BadgerDB) Close() error { b.mtx.Lock() defer b.mtx.Unlock() - writeVersionsFile(b.vmgr, filepath.Join(b.db.Opts().Dir, versionsFilename)) + err := writeVersionsFile(b.vmgr, filepath.Join(b.db.Opts().Dir, versionsFilename)) + if err != nil { + return err + } return b.db.Close() } // Versions implements Connection. // Returns a VersionSet that is valid until the next call to SaveVersion or DeleteVersion. -func (b *BadgerDB) Versions() (db.VersionSet, error) { +func (b *BadgerDB) Versions() (types.VersionSet, error) { b.mtx.RLock() defer b.mtx.RUnlock() return b.vmgr, nil @@ -213,10 +218,14 @@ func (b *BadgerDB) save(target uint64) (uint64, error) { b.mtx.Lock() defer b.mtx.Unlock() if b.openWriters > 0 { - return 0, db.ErrOpenTransactions + return 0, types.ErrOpenTransactions } b.vmgr = b.vmgr.Copy() - return b.vmgr.Save(target) + v, err := b.vmgr.Save(target) + if err != nil { + return v, err + } + return v, writeVersionsFile(b.vmgr, filepath.Join(b.db.Opts().Dir, versionsFilename)) } // SaveNextVersion implements Connection. @@ -227,7 +236,7 @@ func (b *BadgerDB) SaveNextVersion() (uint64, error) { // SaveVersion implements Connection. func (b *BadgerDB) SaveVersion(target uint64) error { if target == 0 { - return db.ErrInvalidVersion + return types.ErrInvalidVersion } _, err := b.save(target) return err @@ -237,18 +246,18 @@ func (b *BadgerDB) DeleteVersion(target uint64) error { b.mtx.Lock() defer b.mtx.Unlock() if !b.vmgr.Exists(target) { - return db.ErrVersionDoesNotExist + return types.ErrVersionDoesNotExist } b.vmgr = b.vmgr.Copy() b.vmgr.Delete(target) - return nil + return writeVersionsFile(b.vmgr, filepath.Join(b.db.Opts().Dir, versionsFilename)) } func (b *BadgerDB) Revert() error { b.mtx.RLock() defer b.mtx.RUnlock() if b.openWriters > 0 { - return db.ErrOpenTransactions + return types.ErrOpenTransactions } // Revert from latest commit timestamp to last "saved" timestamp @@ -263,6 +272,34 @@ func (b *BadgerDB) Revert() error { return errors.New("bad version history") } } + return b.revert(target) +} + +// RevertTo reverts the DB to a target version +func (b *BadgerDB) RevertTo(ver uint64) error { + b.mtx.RLock() + defer b.mtx.RUnlock() + if b.openWriters > 0 { + return types.ErrOpenTransactions + } + + // Revert from latest commit timestamp to target timestamp + if !b.vmgr.Exists(ver) { + return types.ErrVersionDoesNotExist + } + targetTs, has := b.vmgr.versionTs(ver) + if !has { + return errors.New("bad version history") + } + if err := b.revert(targetTs); err != nil { + return err + } + b.vmgr.DeleteAbove(ver) + return nil +} + +// reverts to a target timestamp +func (b *BadgerDB) revert(target uint64) error { lastTs := b.vmgr.lastTs if target == lastTs { return nil @@ -325,7 +362,7 @@ func (b *BadgerDB) Stats() map[string]string { return nil } func (tx *badgerTxn) Get(key []byte) ([]byte, error) { if len(key) == 0 { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } item, err := tx.txn.Get(key) @@ -343,7 +380,7 @@ func (tx *badgerTxn) Get(key []byte) ([]byte, error) { func (tx *badgerTxn) Has(key []byte) (bool, error) { if len(key) == 0 { - return false, db.ErrKeyEmpty + return false, types.ErrKeyEmpty } _, err := tx.txn.Get(key) @@ -357,14 +394,34 @@ func (tx *badgerWriter) Set(key, value []byte) error { if err := dbutil.ValidateKv(key, value); err != nil { return err } - return tx.txn.Set(key, value) + err := tx.txn.Set(key, value) + if errors.Is(err, badger.ErrTxnTooBig) { + err = tx.Commit() + if err != nil { + return err + } + newtx := tx.db.ReadWriter().(*badgerWriter) + *tx = *newtx + err = tx.txn.Set(key, value) + } + return err } func (tx *badgerWriter) Delete(key []byte) error { if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty + } + err := tx.txn.Delete(key) + if errors.Is(err, badger.ErrTxnTooBig) { + err = tx.Commit() + if err != nil { + return err + } + newtx := tx.db.ReadWriter().(*badgerWriter) + *tx = *newtx + err = tx.txn.Delete(key) } - return tx.txn.Delete(key) + return err } func (tx *badgerWriter) Commit() (err error) { @@ -396,7 +453,7 @@ func (tx *badgerWriter) Discard() error { func (tx *badgerTxn) iteratorOpts(start, end []byte, opts badger.IteratorOptions) (*badgerIterator, error) { if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } iter := tx.txn.NewIterator(opts) iter.Rewind() @@ -414,12 +471,12 @@ func (tx *badgerTxn) iteratorOpts(start, end []byte, opts badger.IteratorOptions }, nil } -func (tx *badgerTxn) Iterator(start, end []byte) (db.Iterator, error) { +func (tx *badgerTxn) Iterator(start, end []byte) (types.Iterator, error) { opts := badger.DefaultIteratorOptions return tx.iteratorOpts(start, end, opts) } -func (tx *badgerTxn) ReverseIterator(start, end []byte) (db.Iterator, error) { +func (tx *badgerTxn) ReverseIterator(start, end []byte) (types.Iterator, error) { opts := badger.DefaultIteratorOptions opts.Reverse = true return tx.iteratorOpts(end, start, opts) @@ -516,3 +573,12 @@ func (vm *versionManager) Delete(target uint64) { vm.VersionManager.Delete(target) delete(vm.vmap, target) } + +func (vm *versionManager) DeleteAbove(target uint64) { + vm.VersionManager.DeleteAbove(target) + for v, _ := range vm.vmap { + if v > target { + delete(vm.vmap, v) + } + } +} diff --git a/db/badgerdb/db_test.go b/db/badgerdb/db_test.go index e043117c7a61..d80dae5fd2be 100644 --- a/db/badgerdb/db_test.go +++ b/db/badgerdb/db_test.go @@ -5,11 +5,11 @@ import ( "github.com/stretchr/testify/require" - "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/db/dbtest" + "github.com/cosmos/cosmos-sdk/db/types" ) -func load(t *testing.T, dir string) db.Connection { +func load(t *testing.T, dir string) types.Connection { d, err := NewDB(dir) require.NoError(t, err) return d @@ -39,3 +39,16 @@ func TestRevert(t *testing.T) { func TestReloadDB(t *testing.T) { dbtest.DoTestReloadDB(t, load) } + +func TestVersionManager(t *testing.T) { + new := func(vs []uint64) types.VersionSet { + vmap := map[uint64]uint64{} + var lastTs uint64 + for _, v := range vs { + vmap[v] = v + lastTs = v + } + return &versionManager{types.NewVersionManager(vs), vmap, lastTs} + } + dbtest.DoTestVersionSet(t, new) +} diff --git a/db/dbtest/benchmark.go b/db/dbtest/benchmark.go index cb4a77ba5a81..fdfed250710a 100644 --- a/db/dbtest/benchmark.go +++ b/db/dbtest/benchmark.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" - dbm "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" ) func Int64ToBytes(i int64) []byte { @@ -21,7 +21,7 @@ func BytesToInt64(buf []byte) int64 { return int64(binary.BigEndian.Uint64(buf)) } -func BenchmarkRangeScans(b *testing.B, db dbm.ReadWriter, dbSize int64) { +func BenchmarkRangeScans(b *testing.B, db types.ReadWriter, dbSize int64) { b.StopTimer() rangeSize := int64(10000) @@ -40,7 +40,7 @@ func BenchmarkRangeScans(b *testing.B, db dbm.ReadWriter, dbSize int64) { b.StartTimer() for i := 0; i < b.N; i++ { - start := rand.Int63n(dbSize - rangeSize) + start := rand.Int63n(dbSize - rangeSize) // nolint: gosec end := start + rangeSize iter, err := db.Iterator(Int64ToBytes(start), Int64ToBytes(end)) require.NoError(b, err) @@ -53,7 +53,7 @@ func BenchmarkRangeScans(b *testing.B, db dbm.ReadWriter, dbSize int64) { } } -func BenchmarkRandomReadsWrites(b *testing.B, db dbm.ReadWriter) { +func BenchmarkRandomReadsWrites(b *testing.B, db types.ReadWriter) { b.StopTimer() // create dummy data @@ -67,7 +67,7 @@ func BenchmarkRandomReadsWrites(b *testing.B, db dbm.ReadWriter) { for i := 0; i < b.N; i++ { { - idx := rand.Int63n(numItems) + idx := rand.Int63n(numItems) // nolint: gosec internal[idx]++ val := internal[idx] idxBytes := Int64ToBytes(idx) @@ -80,7 +80,7 @@ func BenchmarkRandomReadsWrites(b *testing.B, db dbm.ReadWriter) { } { - idx := rand.Int63n(numItems) + idx := rand.Int63n(numItems) // nolint: gosec valExp := internal[idx] idxBytes := Int64ToBytes(idx) valBytes, err := db.Get(idxBytes) diff --git a/db/dbtest/testcases.go b/db/dbtest/testcases.go index d80196a1b400..b80b77168bca 100644 --- a/db/dbtest/testcases.go +++ b/db/dbtest/testcases.go @@ -8,10 +8,10 @@ import ( "github.com/stretchr/testify/require" - dbm "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" ) -type Loader func(*testing.T, string) dbm.Connection +type Loader func(*testing.T, string) types.Connection func ikey(i int) []byte { return []byte(fmt.Sprintf("key-%03d", i)) } func ival(i int) []byte { return []byte(fmt.Sprintf("val-%03d", i)) } @@ -20,9 +20,10 @@ func DoTestGetSetHasDelete(t *testing.T, load Loader) { t.Helper() db := load(t, t.TempDir()) - var txn dbm.ReadWriter - view := db.Reader() + var txn types.ReadWriter + var view types.Reader + view = db.Reader() require.NotNil(t, view) // A nonexistent key should return nil. @@ -84,28 +85,28 @@ func DoTestGetSetHasDelete(t *testing.T, load Loader) { // Setting, getting, and deleting an empty key should error. _, err = txn.Get([]byte{}) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) _, err = txn.Get(nil) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) _, err = txn.Has([]byte{}) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) _, err = txn.Has(nil) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) err = txn.Set([]byte{}, []byte{0x01}) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) err = txn.Set(nil, []byte{0x01}) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) err = txn.Delete([]byte{}) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) err = txn.Delete(nil) - require.Equal(t, dbm.ErrKeyEmpty, err) + require.Equal(t, types.ErrKeyEmpty, err) // Setting a nil value should error, but an empty value is fine. err = txn.Set([]byte("x"), nil) - require.Equal(t, dbm.ErrValueNil, err) + require.Equal(t, types.ErrValueNil, err) err = txn.Set([]byte("x"), []byte{}) require.NoError(t, err) @@ -139,19 +140,19 @@ func DoTestIterators(t *testing.T, load Loader) { } require.NoError(t, txn.Commit()) - testRange := func(t *testing.T, iter dbm.Iterator, expected []string) { + type testCase struct { + start, end []byte + expected []string + } + testRange := func(t *testing.T, iter types.Iterator, tc testCase) { i := 0 for ; iter.Next(); i++ { - expectedValue := expected[i] + expectedValue := tc.expected[i] value := iter.Value() - require.Equal(t, expectedValue, string(value), "i=%v", i) + require.Equal(t, expectedValue, string(value), + "i=%v case=[[%x] [%x])", i, tc.start, tc.end) } - require.Equal(t, len(expected), i) - } - - type testCase struct { - start, end []byte - expected []string + require.Equal(t, len(tc.expected), i) } view := db.Reader() @@ -164,11 +165,10 @@ func DoTestIterators(t *testing.T, load Loader) { {[]byte{0x00, 0x01}, []byte{0x01}, []string{"0 1", "0 2"}}, {nil, []byte{0x01}, []string{"0", "0 0", "0 1", "0 2"}}, } - for i, tc := range iterCases { - t.Logf("Iterator case %d: [%v, %v)", i, tc.start, tc.end) + for _, tc := range iterCases { it, err := view.Iterator(tc.start, tc.end) require.NoError(t, err) - testRange(t, it, tc.expected) + testRange(t, it, tc) it.Close() } @@ -180,11 +180,10 @@ func DoTestIterators(t *testing.T, load Loader) { {[]byte{0x00, 0x01}, []byte{0x01}, []string{"0 2", "0 1"}}, {nil, []byte{0x01}, []string{"0 2", "0 1", "0 0", "0"}}, } - for i, tc := range reverseCases { - t.Logf("ReverseIterator case %d: [%v, %v)", i, tc.start, tc.end) + for _, tc := range reverseCases { it, err := view.ReverseIterator(tc.start, tc.end) require.NoError(t, err) - testRange(t, it, tc.expected) + testRange(t, it, tc) it.Close() } @@ -260,12 +259,12 @@ func DoTestVersioning(t *testing.T, load Loader) { require.False(t, has) require.NoError(t, view.Discard()) - view, err = db.ReaderAt(versions.Last() + 1) //nolint:staticcheck // we nolint here because we are checking for the absence of an error. - require.Equal(t, dbm.ErrVersionDoesNotExist, err, "should fail to read a nonexistent version") + view, err = db.ReaderAt(versions.Last() + 1) + require.Equal(t, types.ErrVersionDoesNotExist, err, "should fail to read a nonexistent version") require.NoError(t, db.DeleteVersion(v2), "should delete version v2") - view, err = db.ReaderAt(v2) //nolint:staticcheck // we nolint here because we are checking for the absence of an error. - require.Equal(t, dbm.ErrVersionDoesNotExist, err) + view, err = db.ReaderAt(v2) + require.Equal(t, types.ErrVersionDoesNotExist, err) // Ensure latest version is accurate prev := v3 @@ -296,10 +295,10 @@ func DoTestVersioning(t *testing.T, load Loader) { func DoTestTransactions(t *testing.T, load Loader, multipleWriters bool) { t.Helper() db := load(t, t.TempDir()) - // Both methods should work in a DBWriter context - writerFuncs := []func() dbm.Writer{ + // Both methods should work in a Writer context + writerFuncs := []func() types.Writer{ db.Writer, - func() dbm.Writer { return db.ReadWriter() }, + func() types.Writer { return db.ReadWriter() }, } for _, getWriter := range writerFuncs { @@ -322,7 +321,7 @@ func DoTestTransactions(t *testing.T, load Loader, multipleWriters bool) { tx := getWriter() require.NoError(t, tx.Set([]byte("0"), []byte("a"))) _, err := db.SaveNextVersion() - require.Equal(t, dbm.ErrOpenTransactions, err) + require.Equal(t, types.ErrOpenTransactions, err) require.NoError(t, tx.Discard()) }) @@ -396,7 +395,7 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) { t.Helper() dirname := t.TempDir() db := load(t, dirname) - var txn dbm.Writer + var txn types.Writer initContents := func() { txn = db.Writer() @@ -413,6 +412,8 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) { } initContents() + require.Error(t, db.RevertTo(0)) // RevertTo(0) is not allowed - user must use Revert() + require.Error(t, db.RevertTo(10)) // non-existent version require.NoError(t, db.Revert()) view := db.Reader() it, err := view.Iterator(nil, nil) @@ -422,7 +423,7 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) { require.NoError(t, view.Discard()) initContents() - _, err = db.SaveNextVersion() + v1, err := db.SaveNextVersion() require.NoError(t, err) // get snapshot of db state @@ -443,7 +444,7 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) { require.NoError(t, err) for it.Next() { val, has := state[string(it.Key())] - require.True(t, has, "key should not be present: %v => %v", it.Key(), it.Value()) + require.True(t, has, "unexpected key: %v => %v", it.Key(), it.Value()) require.Equal(t, val, it.Value()) count++ } @@ -452,7 +453,7 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) { view.Discard() } - changeContents := func() { + modifyContents := func() { txn = db.Writer() require.NoError(t, txn.Set([]byte{3}, []byte{15})) require.NoError(t, txn.Set([]byte{7}, []byte{70})) @@ -464,48 +465,54 @@ func DoTestRevert(t *testing.T, load Loader, reload bool) { txn = db.Writer() require.NoError(t, txn.Set([]byte{3}, []byte{30})) require.NoError(t, txn.Set([]byte{8}, []byte{8})) - require.NoError(t, txn.Delete([]byte{9})) + require.NoError(t, txn.Delete([]byte{9})) // redundant delete require.NoError(t, txn.Commit()) } - changeContents() + modifyContents() - if reload { - db.Close() - db = load(t, dirname) + cases := []func(types.Connection) error{ + func(db types.Connection) error { return db.Revert() }, + func(db types.Connection) error { return db.RevertTo(v1) }, } + for _, revertFunc := range cases { + if reload { + db.Close() + db = load(t, dirname) + } - txn = db.Writer() - require.Error(t, db.Revert()) // can't revert with open writers - txn.Discard() - require.NoError(t, db.Revert()) + txn = db.Writer() + require.Error(t, revertFunc(db)) // can't revert with open writers + txn.Discard() + require.NoError(t, db.Revert()) - if reload { - db.Close() - db = load(t, dirname) - } + if reload { + db.Close() + db = load(t, dirname) + } - checkContents() + checkContents() - // With intermediate versions added & deleted, revert again to v1 - changeContents() - v2, _ := db.SaveNextVersion() + // With intermediate versions added & deleted, revert again to v1 + modifyContents() + v2, _ := db.SaveNextVersion() - txn = db.Writer() - require.NoError(t, txn.Delete([]byte{6})) - require.NoError(t, txn.Set([]byte{8}, []byte{9})) - require.NoError(t, txn.Set([]byte{11}, []byte{11})) - txn.Commit() - v3, _ := db.SaveNextVersion() + txn = db.Writer() + require.NoError(t, txn.Delete([]byte{6})) + require.NoError(t, txn.Set([]byte{8}, []byte{9})) + require.NoError(t, txn.Set([]byte{11}, []byte{11})) + txn.Commit() + v3, _ := db.SaveNextVersion() - txn = db.Writer() - require.NoError(t, txn.Set([]byte{12}, []byte{12})) - txn.Commit() + txn = db.Writer() + require.NoError(t, txn.Set([]byte{12}, []byte{12})) + txn.Commit() - db.DeleteVersion(v2) - db.DeleteVersion(v3) - db.Revert() - checkContents() + db.DeleteVersion(v2) + db.DeleteVersion(v3) + revertFunc(db) + checkContents() + } require.NoError(t, db.Close()) } diff --git a/db/dbtest/util.go b/db/dbtest/util.go index 66bc208bf7ad..c567988f1559 100644 --- a/db/dbtest/util.go +++ b/db/dbtest/util.go @@ -6,47 +6,47 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" ) -func AssertNext(t *testing.T, itr dbm.Iterator, expected bool) { +func AssertNext(t *testing.T, itr types.Iterator, expected bool) { t.Helper() require.Equal(t, expected, itr.Next()) } -func AssertDomain(t *testing.T, itr dbm.Iterator, start, end []byte) { +func AssertDomain(t *testing.T, itr types.Iterator, start, end []byte) { t.Helper() ds, de := itr.Domain() assert.Equal(t, start, ds, "checkDomain domain start incorrect") assert.Equal(t, end, de, "checkDomain domain end incorrect") } -func AssertItem(t *testing.T, itr dbm.Iterator, key, value []byte) { +func AssertItem(t *testing.T, itr types.Iterator, key, value []byte) { t.Helper() assert.Exactly(t, key, itr.Key()) assert.Exactly(t, value, itr.Value()) } -func AssertInvalid(t *testing.T, itr dbm.Iterator) { +func AssertInvalid(t *testing.T, itr types.Iterator) { t.Helper() AssertNext(t, itr, false) AssertKeyPanics(t, itr) AssertValuePanics(t, itr) } -func AssertKeyPanics(t *testing.T, itr dbm.Iterator) { +func AssertKeyPanics(t *testing.T, itr types.Iterator) { t.Helper() assert.Panics(t, func() { itr.Key() }, "checkKeyPanics expected panic but didn't") } -func AssertValue(t *testing.T, db dbm.Reader, key, valueWanted []byte) { +func AssertValue(t *testing.T, db types.Reader, key, valueWanted []byte) { t.Helper() valueGot, err := db.Get(key) assert.NoError(t, err) assert.Equal(t, valueWanted, valueGot) } -func AssertValuePanics(t *testing.T, itr dbm.Iterator) { +func AssertValuePanics(t *testing.T, itr types.Iterator) { t.Helper() assert.Panics(t, func() { itr.Value() }) } diff --git a/db/version_manager_test.go b/db/dbtest/version_set.go similarity index 66% rename from db/version_manager_test.go rename to db/dbtest/version_set.go index 6575c7e1758c..7bdcc2cd01ec 100644 --- a/db/version_manager_test.go +++ b/db/dbtest/version_set.go @@ -1,4 +1,4 @@ -package db_test +package dbtest import ( "sort" @@ -6,12 +6,12 @@ import ( "github.com/stretchr/testify/require" - "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" ) -// Test that VersionManager satisfies the behavior of VersionSet -func TestVersionManager(t *testing.T) { - vm := db.NewVersionManager(nil) +// Test that a type satisfies the behavior of VersionSet +func DoTestVersionSet(t *testing.T, new func([]uint64) types.VersionSet) { + vm := types.NewVersionManager(nil) require.Equal(t, uint64(0), vm.Last()) require.Equal(t, 0, vm.Count()) require.True(t, vm.Equal(vm)) @@ -42,11 +42,7 @@ func TestVersionManager(t *testing.T) { require.Equal(t, id2, vm.Initial()) require.Equal(t, id3, vm.Last()) - var all []uint64 - for it := vm.Iterator(); it.Next(); { - all = append(all, it.Value()) - } - sort.Slice(all, func(i, j int) bool { return all[i] < all[j] }) + all := allVersions(vm) require.Equal(t, []uint64{id2, id3}, all) vmc := vm.Copy() @@ -54,6 +50,20 @@ func TestVersionManager(t *testing.T) { require.NoError(t, err) require.False(t, vm.Exists(id5)) // true copy is made - vm2 := db.NewVersionManager([]uint64{id2, id3}) + vm2 := types.NewVersionManager([]uint64{id2, id3}) require.True(t, vm.Equal(vm2)) + + vm = types.NewVersionManager([]uint64{1, 2, 3, 5, 10}) + vm.DeleteAbove(10) + require.Equal(t, []uint64{1, 2, 3, 5, 10}, allVersions(vm)) + vm.DeleteAbove(4) + require.Equal(t, []uint64{1, 2, 3}, allVersions(vm)) +} + +func allVersions(vm *types.VersionManager) (all []uint64) { + for it := vm.Iterator(); it.Next(); { + all = append(all, it.Value()) + } + sort.Slice(all, func(i, j int) bool { return all[i] < all[j] }) + return } diff --git a/db/internal/backends/imports.go b/db/internal/backends/imports.go new file mode 100644 index 000000000000..2069e37c3865 --- /dev/null +++ b/db/internal/backends/imports.go @@ -0,0 +1,8 @@ +// This is a dummy package used to trigger initialization of backend creators +package backends + +import ( + _ "github.com/cosmos/cosmos-sdk/db/badgerdb" + _ "github.com/cosmos/cosmos-sdk/db/memdb" + _ "github.com/cosmos/cosmos-sdk/db/rocksdb" +) diff --git a/db/internal/util.go b/db/internal/util.go index ea7425319ce3..cc35d33e6ace 100644 --- a/db/internal/util.go +++ b/db/internal/util.go @@ -3,15 +3,15 @@ package util import ( "fmt" - dbm "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" ) func ValidateKv(key, value []byte) error { if len(key) == 0 { - return dbm.ErrKeyEmpty + return types.ErrKeyEmpty } if value == nil { - return dbm.ErrValueNil + return types.ErrValueNil } return nil } diff --git a/db/memdb/creator.go b/db/memdb/creator.go new file mode 100644 index 000000000000..a1ad082ca997 --- /dev/null +++ b/db/memdb/creator.go @@ -0,0 +1,13 @@ +// No build directive, memdb is always built +package memdb + +import ( + "github.com/cosmos/cosmos-sdk/db/types" +) + +func init() { + creator := func(name string, dir string) (types.Connection, error) { + return NewDB(), nil + } + types.RegisterCreator(types.MemDBBackend, creator, false) +} diff --git a/db/memdb/db.go b/db/memdb/db.go index fd558876ac3d..0343fefb06f7 100644 --- a/db/memdb/db.go +++ b/db/memdb/db.go @@ -6,8 +6,8 @@ import ( "sync" "sync/atomic" - "github.com/cosmos/cosmos-sdk/db" dbutil "github.com/cosmos/cosmos-sdk/db/internal" + "github.com/cosmos/cosmos-sdk/db/types" "github.com/google/btree" ) @@ -32,7 +32,7 @@ type MemDB struct { btree *btree.BTree // Main contents mtx sync.RWMutex // Guards version history saved map[uint64]*btree.BTree // Past versions - vmgr *db.VersionManager // Mirrors version keys + vmgr *types.VersionManager // Mirrors version keys openWriters int32 // Open writers } @@ -43,10 +43,10 @@ type dbTxn struct { type dbWriter struct{ dbTxn } var ( - _ db.Connection = (*MemDB)(nil) - _ db.Reader = (*dbTxn)(nil) - _ db.Writer = (*dbWriter)(nil) - _ db.ReadWriter = (*dbWriter)(nil) + _ types.Connection = (*MemDB)(nil) + _ types.Reader = (*dbTxn)(nil) + _ types.Writer = (*dbWriter)(nil) + _ types.ReadWriter = (*dbWriter)(nil) ) // item is a btree.Item with byte slices as keys and values @@ -60,7 +60,7 @@ func NewDB() *MemDB { return &MemDB{ btree: btree.New(bTreeDegree), saved: make(map[uint64]*btree.BTree), - vmgr: db.NewVersionManager(nil), + vmgr: types.NewVersionManager(nil), } } @@ -68,7 +68,7 @@ func (dbm *MemDB) newTxn(tree *btree.BTree) dbTxn { return dbTxn{tree, dbm} } -// Close implements DB. +// Close implements Connection. // Close is a noop since for an in-memory database, we don't have a destination to flush // contents to nor do we want any data loss on invoking Close(). // See the discussion in https://github.com/tendermint/tendermint/libs/pull/56 @@ -77,14 +77,14 @@ func (dbm *MemDB) Close() error { } // Versions implements Connection. -func (dbm *MemDB) Versions() (db.VersionSet, error) { +func (dbm *MemDB) Versions() (types.VersionSet, error) { dbm.mtx.RLock() defer dbm.mtx.RUnlock() return dbm.vmgr, nil } // Reader implements Connection. -func (dbm *MemDB) Reader() db.Reader { +func (dbm *MemDB) Reader() types.Reader { dbm.mtx.RLock() defer dbm.mtx.RUnlock() ret := dbm.newTxn(dbm.btree) @@ -92,24 +92,24 @@ func (dbm *MemDB) Reader() db.Reader { } // ReaderAt implements Connection. -func (dbm *MemDB) ReaderAt(version uint64) (db.Reader, error) { +func (dbm *MemDB) ReaderAt(version uint64) (types.Reader, error) { dbm.mtx.RLock() defer dbm.mtx.RUnlock() tree, ok := dbm.saved[version] if !ok { - return nil, db.ErrVersionDoesNotExist + return nil, types.ErrVersionDoesNotExist } ret := dbm.newTxn(tree) return &ret, nil } // Writer implements Connection. -func (dbm *MemDB) Writer() db.Writer { +func (dbm *MemDB) Writer() types.Writer { return dbm.ReadWriter() } // ReadWriter implements Connection. -func (dbm *MemDB) ReadWriter() db.ReadWriter { +func (dbm *MemDB) ReadWriter() types.ReadWriter { dbm.mtx.RLock() defer dbm.mtx.RUnlock() atomic.AddInt32(&dbm.openWriters, 1) @@ -121,7 +121,7 @@ func (dbm *MemDB) save(target uint64) (uint64, error) { dbm.mtx.Lock() defer dbm.mtx.Unlock() if dbm.openWriters > 0 { - return 0, db.ErrOpenTransactions + return 0, types.ErrOpenTransactions } newVmgr := dbm.vmgr.Copy() @@ -142,7 +142,7 @@ func (dbm *MemDB) SaveNextVersion() (uint64, error) { // SaveNextVersion implements Connection. func (dbm *MemDB) SaveVersion(target uint64) error { if target == 0 { - return db.ErrInvalidVersion + return types.ErrInvalidVersion } _, err := dbm.save(target) return err @@ -153,7 +153,7 @@ func (dbm *MemDB) DeleteVersion(target uint64) error { dbm.mtx.Lock() defer dbm.mtx.Unlock() if _, has := dbm.saved[target]; !has { - return db.ErrVersionDoesNotExist + return types.ErrVersionDoesNotExist } delete(dbm.saved, target) dbm.vmgr = dbm.vmgr.Copy() @@ -165,34 +165,53 @@ func (dbm *MemDB) Revert() error { dbm.mtx.RLock() defer dbm.mtx.RUnlock() if dbm.openWriters > 0 { - return db.ErrOpenTransactions + return types.ErrOpenTransactions } - last := dbm.vmgr.Last() if last == 0 { dbm.btree = btree.New(bTreeDegree) return nil } + return dbm.revert(last) +} + +func (dbm *MemDB) RevertTo(target uint64) error { + dbm.mtx.RLock() + defer dbm.mtx.RUnlock() + if dbm.openWriters > 0 { + return types.ErrOpenTransactions + } + if !dbm.vmgr.Exists(target) { + return types.ErrVersionDoesNotExist + } + err := dbm.revert(target) + if err != nil { + dbm.vmgr.DeleteAbove(target) + } + return err +} + +func (dbm *MemDB) revert(target uint64) error { var has bool - dbm.btree, has = dbm.saved[last] + dbm.btree, has = dbm.saved[target] if !has { - return fmt.Errorf("bad version history: version %v not saved", last) + return fmt.Errorf("bad version history: version %v not saved", target) } - for ver := range dbm.saved { - if ver > last { + for ver, _ := range dbm.saved { + if ver > target { delete(dbm.saved, ver) } } return nil } -// Get implements DBReader. +// Get implements Reader. func (tx *dbTxn) Get(key []byte) ([]byte, error) { if tx.btree == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if len(key) == 0 { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } i := tx.btree.Get(newKey(key)) if i != nil { @@ -201,21 +220,21 @@ func (tx *dbTxn) Get(key []byte) ([]byte, error) { return nil, nil } -// Has implements DBReader. +// Has implements Reader. func (tx *dbTxn) Has(key []byte) (bool, error) { if tx.btree == nil { - return false, db.ErrTransactionClosed + return false, types.ErrTransactionClosed } if len(key) == 0 { - return false, db.ErrKeyEmpty + return false, types.ErrKeyEmpty } return tx.btree.Has(newKey(key)), nil } -// Set implements DBWriter. +// Set implements Writer. func (tx *dbWriter) Set(key []byte, value []byte) error { if tx.btree == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } if err := dbutil.ValidateKv(key, value); err != nil { return err @@ -224,46 +243,46 @@ func (tx *dbWriter) Set(key []byte, value []byte) error { return nil } -// Delete implements DBWriter. +// Delete implements Writer. func (tx *dbWriter) Delete(key []byte) error { if tx.btree == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } tx.btree.Delete(newKey(key)) return nil } -// Iterator implements DBReader. +// Iterator implements Reader. // Takes out a read-lock on the database until the iterator is closed. -func (tx *dbTxn) Iterator(start, end []byte) (db.Iterator, error) { +func (tx *dbTxn) Iterator(start, end []byte) (types.Iterator, error) { if tx.btree == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } return newMemDBIterator(tx, start, end, false), nil } -// ReverseIterator implements DBReader. +// ReverseIterator implements Reader. // Takes out a read-lock on the database until the iterator is closed. -func (tx *dbTxn) ReverseIterator(start, end []byte) (db.Iterator, error) { +func (tx *dbTxn) ReverseIterator(start, end []byte) (types.Iterator, error) { if tx.btree == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } return newMemDBIterator(tx, start, end, true), nil } -// Commit implements DBWriter. +// Commit implements Writer. func (tx *dbWriter) Commit() error { if tx.btree == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } tx.db.mtx.Lock() defer tx.db.mtx.Unlock() @@ -271,7 +290,7 @@ func (tx *dbWriter) Commit() error { return tx.Discard() } -// Discard implements DBReader. +// Discard implements Reader. func (tx *dbTxn) Discard() error { if tx.btree != nil { tx.btree = nil @@ -279,7 +298,7 @@ func (tx *dbTxn) Discard() error { return nil } -// Discard implements DBWriter. +// Discard implements Writer. func (tx *dbWriter) Discard() error { if tx.btree != nil { defer atomic.AddInt32(&tx.db.openWriters, -1) diff --git a/db/memdb/db_test.go b/db/memdb/db_test.go index e330718ca6cb..c52fec72dad2 100644 --- a/db/memdb/db_test.go +++ b/db/memdb/db_test.go @@ -3,8 +3,8 @@ package memdb import ( "testing" - "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/db/dbtest" + "github.com/cosmos/cosmos-sdk/db/types" ) func BenchmarkMemDBRangeScans1M(b *testing.B) { @@ -28,7 +28,7 @@ func BenchmarkMemDBRandomReadsWrites(b *testing.B) { dbtest.BenchmarkRandomReadsWrites(b, dbm.ReadWriter()) } -func load(t *testing.T, _ string) db.Connection { +func load(t *testing.T, _ string) types.Connection { return NewDB() } diff --git a/db/memdb/iterator.go b/db/memdb/iterator.go index 7d2f89721127..c0a215ec07d4 100644 --- a/db/memdb/iterator.go +++ b/db/memdb/iterator.go @@ -4,7 +4,7 @@ import ( "bytes" "context" - "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" "github.com/google/btree" ) @@ -24,7 +24,7 @@ type memDBIterator struct { end []byte } -var _ db.Iterator = (*memDBIterator)(nil) +var _ types.Iterator = (*memDBIterator)(nil) // newMemDBIterator creates a new memDBIterator. // A visitor is passed to the btree which streams items to the iterator over a channel. Advancing diff --git a/db/prefix/prefix.go b/db/prefix/prefix.go index 2d218cedcbe5..43522d4add1f 100644 --- a/db/prefix/prefix.go +++ b/db/prefix/prefix.go @@ -3,36 +3,36 @@ package prefix import ( - "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" ) // prefixed Reader type Reader struct { - db db.Reader + db types.Reader prefix []byte } // prefixed ReadWriter type ReadWriter struct { - db db.ReadWriter + db types.ReadWriter prefix []byte } // prefixed Writer type Writer struct { - db db.Writer + db types.Writer prefix []byte } var ( - _ db.Reader = (*Reader)(nil) - _ db.ReadWriter = (*ReadWriter)(nil) - _ db.Writer = (*Writer)(nil) + _ types.Reader = (*Reader)(nil) + _ types.ReadWriter = (*ReadWriter)(nil) + _ types.Writer = (*Writer)(nil) ) // NewReadereader returns a DBReader that only has access to the subset of DB keys // that contain the given prefix. -func NewReader(dbr db.Reader, prefix []byte) Reader { +func NewReader(dbr types.Reader, prefix []byte) Reader { return Reader{ prefix: prefix, db: dbr, @@ -41,7 +41,7 @@ func NewReader(dbr db.Reader, prefix []byte) Reader { // NewReadWriter returns a DBReader that only has access to the subset of DB keys // that contain the given prefix. -func NewReadWriter(dbrw db.ReadWriter, prefix []byte) ReadWriter { +func NewReadWriter(dbrw types.ReadWriter, prefix []byte) ReadWriter { return ReadWriter{ prefix: prefix, db: dbrw, @@ -50,7 +50,7 @@ func NewReadWriter(dbrw db.ReadWriter, prefix []byte) ReadWriter { // NewWriterriter returns a DBWriter that reads/writes only from the subset of DB keys // that contain the given prefix -func NewWriter(dbw db.Writer, prefix []byte) Writer { +func NewWriter(dbw types.Writer, prefix []byte) Writer { return Writer{ prefix: prefix, db: dbw, @@ -64,7 +64,7 @@ func prefixed(prefix, key []byte) []byte { // Get implements DBReader. func (pdb Reader) Get(key []byte) ([]byte, error) { if len(key) == 0 { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } return pdb.db.Get(prefixed(pdb.prefix, key)) } @@ -72,15 +72,15 @@ func (pdb Reader) Get(key []byte) ([]byte, error) { // Has implements DBReader. func (pdb Reader) Has(key []byte) (bool, error) { if len(key) == 0 { - return false, db.ErrKeyEmpty + return false, types.ErrKeyEmpty } return pdb.db.Has(prefixed(pdb.prefix, key)) } // Iterator implements DBReader. -func (pdb Reader) Iterator(start, end []byte) (db.Iterator, error) { +func (pdb Reader) Iterator(start, end []byte) (types.Iterator, error) { if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } var pend []byte @@ -97,9 +97,9 @@ func (pdb Reader) Iterator(start, end []byte) (db.Iterator, error) { } // ReverseIterator implements DBReader. -func (pdb Reader) ReverseIterator(start, end []byte) (db.Iterator, error) { +func (pdb Reader) ReverseIterator(start, end []byte) (types.Iterator, error) { if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } var pend []byte @@ -121,7 +121,7 @@ func (pdb Reader) Discard() error { return pdb.db.Discard() } // Set implements DBReadWriter. func (pdb ReadWriter) Set(key []byte, value []byte) error { if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } return pdb.db.Set(prefixed(pdb.prefix, key), value) } @@ -129,7 +129,7 @@ func (pdb ReadWriter) Set(key []byte, value []byte) error { // Delete implements DBReadWriter. func (pdb ReadWriter) Delete(key []byte) error { if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } return pdb.db.Delete(prefixed(pdb.prefix, key)) } @@ -145,12 +145,12 @@ func (pdb ReadWriter) Has(key []byte) (bool, error) { } // Iterator implements DBReadWriter. -func (pdb ReadWriter) Iterator(start, end []byte) (db.Iterator, error) { +func (pdb ReadWriter) Iterator(start, end []byte) (types.Iterator, error) { return NewReader(pdb.db, pdb.prefix).Iterator(start, end) } // ReverseIterator implements DBReadWriter. -func (pdb ReadWriter) ReverseIterator(start, end []byte) (db.Iterator, error) { +func (pdb ReadWriter) ReverseIterator(start, end []byte) (types.Iterator, error) { return NewReader(pdb.db, pdb.prefix).ReverseIterator(start, end) } @@ -163,7 +163,7 @@ func (pdb ReadWriter) Discard() error { return pdb.db.Discard() } // Set implements DBReadWriter. func (pdb Writer) Set(key []byte, value []byte) error { if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } return pdb.db.Set(prefixed(pdb.prefix, key), value) } @@ -171,7 +171,7 @@ func (pdb Writer) Set(key []byte, value []byte) error { // Delete implements DBWriter. func (pdb Writer) Delete(key []byte) error { if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } return pdb.db.Delete(prefixed(pdb.prefix, key)) } diff --git a/db/reexport.go b/db/reexport.go new file mode 100644 index 000000000000..05214a42e234 --- /dev/null +++ b/db/reexport.go @@ -0,0 +1,32 @@ +package db + +import ( + _ "github.com/cosmos/cosmos-sdk/db/internal/backends" + "github.com/cosmos/cosmos-sdk/db/memdb" + "github.com/cosmos/cosmos-sdk/db/types" +) + +type ( + Connection = types.Connection + Reader = types.Reader + Writer = types.Writer + ReadWriter = types.ReadWriter + Iterator = types.Iterator + VersionSet = types.VersionSet + VersionIterator = types.VersionIterator + BackendType = types.BackendType +) + +var ( + ErrVersionDoesNotExist = types.ErrVersionDoesNotExist + + MemDBBackend = types.MemDBBackend + RocksDBBackend = types.RocksDBBackend + BadgerDBBackend = types.BadgerDBBackend + + NewDB = types.NewDB + ReaderAsReadWriter = types.ReaderAsReadWriter + NewVersionManager = types.NewVersionManager + + NewMemDB = memdb.NewDB +) diff --git a/db/rocksdb/batch.go b/db/rocksdb/batch.go index 22818ed35d4e..6f497ede3e29 100644 --- a/db/rocksdb/batch.go +++ b/db/rocksdb/batch.go @@ -1,12 +1,10 @@ -//go:build rocksdb_build - package rocksdb import ( "sync/atomic" - "github.com/cosmos/cosmos-sdk/db" dbutil "github.com/cosmos/cosmos-sdk/db/internal" + "github.com/cosmos/cosmos-sdk/db/types" "github.com/cosmos/gorocksdb" ) @@ -15,7 +13,7 @@ type rocksDBBatch struct { mgr *dbManager } -var _ db.Writer = (*rocksDBBatch)(nil) +var _ types.Writer = (*rocksDBBatch)(nil) func (mgr *dbManager) newRocksDBBatch() *rocksDBBatch { return &rocksDBBatch{ @@ -30,7 +28,7 @@ func (b *rocksDBBatch) Set(key, value []byte) error { return err } if b.batch == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } b.batch.Put(key, value) return nil @@ -39,10 +37,10 @@ func (b *rocksDBBatch) Set(key, value []byte) error { // Delete implements Writer. func (b *rocksDBBatch) Delete(key []byte) error { if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } if b.batch == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } b.batch.Delete(key) return nil @@ -51,7 +49,7 @@ func (b *rocksDBBatch) Delete(key []byte) error { // Write implements Writer. func (b *rocksDBBatch) Commit() (err error) { if b.batch == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } defer func() { err = dbutil.CombineErrors(err, b.Discard(), "Discard also failed") }() err = b.mgr.current.Write(b.mgr.opts.wo, b.batch) diff --git a/db/rocksdb/creator.go b/db/rocksdb/creator.go new file mode 100644 index 000000000000..afedb4beb8e1 --- /dev/null +++ b/db/rocksdb/creator.go @@ -0,0 +1,17 @@ +//go:build rocksdb + +package rocksdb + +import ( + "path/filepath" + + "github.com/cosmos/cosmos-sdk/db/types" +) + +func init() { + creator := func(name string, dir string) (types.Connection, error) { + dir = filepath.Join(dir, name) + return NewDB(dir) + } + types.RegisterCreator(types.RocksDBBackend, creator, false) +} diff --git a/db/rocksdb/db.go b/db/rocksdb/db.go index 3f1e259fdf81..d42bf377e7d3 100644 --- a/db/rocksdb/db.go +++ b/db/rocksdb/db.go @@ -1,5 +1,3 @@ -//go:build rocksdb_build - package rocksdb import ( @@ -11,8 +9,8 @@ import ( "sync" "sync/atomic" - "github.com/cosmos/cosmos-sdk/db" dbutil "github.com/cosmos/cosmos-sdk/db/internal" + "github.com/cosmos/cosmos-sdk/db/types" "github.com/cosmos/gorocksdb" ) @@ -22,27 +20,27 @@ var ( ) var ( - _ db.Connection = (*RocksDB)(nil) - _ db.Reader = (*dbTxn)(nil) - _ db.Writer = (*dbWriter)(nil) - _ db.ReadWriter = (*dbWriter)(nil) + _ types.Connection = (*RocksDB)(nil) + _ types.Reader = (*dbTxn)(nil) + _ types.Writer = (*dbWriter)(nil) + _ types.ReadWriter = (*dbWriter)(nil) ) // RocksDB is a connection to a RocksDB key-value database. type RocksDB = dbManager type dbManager struct { - current *Connection + current *dbConnection dir string opts dbOptions - vmgr *db.VersionManager + vmgr *types.VersionManager mtx sync.RWMutex - // Track open DBWriters + // Track open Writers openWriters int32 cpCache checkpointCache } -type Connection = gorocksdb.OptimisticTransactionDB +type dbConnection = gorocksdb.OptimisticTransactionDB type checkpointCache struct { cache map[uint64]*cpCacheEntry @@ -50,7 +48,7 @@ type checkpointCache struct { } type cpCacheEntry struct { - cxn *Connection + cxn *dbConnection openCount uint } @@ -129,7 +127,7 @@ func (mgr *dbManager) checkpointsDir() string { } // Reads directory for checkpoints files -func readVersions(dir string) (*db.VersionManager, error) { +func readVersions(dir string) (*types.VersionManager, error) { files, err := os.ReadDir(dir) if err != nil { return nil, err @@ -142,23 +140,23 @@ func readVersions(dir string) (*db.VersionManager, error) { } versions = append(versions, version) } - return db.NewVersionManager(versions), nil + return types.NewVersionManager(versions), nil } func (mgr *dbManager) checkpointPath(version uint64) (string, error) { dbPath := filepath.Join(mgr.checkpointsDir(), fmt.Sprintf(checkpointFileFormat, version)) if stat, err := os.Stat(dbPath); err != nil { if errors.Is(err, os.ErrNotExist) { - err = db.ErrVersionDoesNotExist + err = types.ErrVersionDoesNotExist } return "", err } else if !stat.IsDir() { - return "", db.ErrVersionDoesNotExist + return "", types.ErrVersionDoesNotExist } return dbPath, nil } -func (mgr *dbManager) openCheckpoint(version uint64) (*Connection, error) { +func (mgr *dbManager) openCheckpoint(version uint64) (*dbConnection, error) { mgr.cpCache.mtx.Lock() defer mgr.cpCache.mtx.Unlock() cp, has := mgr.cpCache.cache[version] @@ -178,7 +176,7 @@ func (mgr *dbManager) openCheckpoint(version uint64) (*Connection, error) { return db, nil } -func (mgr *dbManager) Reader() db.Reader { +func (mgr *dbManager) Reader() types.Reader { mgr.mtx.RLock() defer mgr.mtx.RUnlock() return &dbTxn{ @@ -189,7 +187,7 @@ func (mgr *dbManager) Reader() db.Reader { } } -func (mgr *dbManager) ReaderAt(version uint64) (db.Reader, error) { +func (mgr *dbManager) ReaderAt(version uint64) (types.Reader, error) { mgr.mtx.RLock() defer mgr.mtx.RUnlock() d, err := mgr.openCheckpoint(version) @@ -204,7 +202,7 @@ func (mgr *dbManager) ReaderAt(version uint64) (db.Reader, error) { }, nil } -func (mgr *dbManager) ReadWriter() db.ReadWriter { +func (mgr *dbManager) ReadWriter() types.ReadWriter { mgr.mtx.RLock() defer mgr.mtx.RUnlock() atomic.AddInt32(&mgr.openWriters, 1) @@ -214,14 +212,14 @@ func (mgr *dbManager) ReadWriter() db.ReadWriter { }} } -func (mgr *dbManager) Writer() db.Writer { +func (mgr *dbManager) Writer() types.Writer { mgr.mtx.RLock() defer mgr.mtx.RUnlock() atomic.AddInt32(&mgr.openWriters, 1) return mgr.newRocksDBBatch() } -func (mgr *dbManager) Versions() (db.VersionSet, error) { +func (mgr *dbManager) Versions() (types.VersionSet, error) { mgr.mtx.RLock() defer mgr.mtx.RUnlock() return mgr.vmgr, nil @@ -235,7 +233,7 @@ func (mgr *dbManager) SaveNextVersion() (uint64, error) { // SaveVersion implements Connection. func (mgr *dbManager) SaveVersion(target uint64) error { if target == 0 { - return db.ErrInvalidVersion + return types.ErrInvalidVersion } _, err := mgr.save(target) return err @@ -245,7 +243,7 @@ func (mgr *dbManager) save(target uint64) (uint64, error) { mgr.mtx.Lock() defer mgr.mtx.Unlock() if mgr.openWriters > 0 { - return 0, db.ErrOpenTransactions + return 0, types.ErrOpenTransactions } newVmgr := mgr.vmgr.Copy() target, err := newVmgr.Save(target) @@ -267,7 +265,7 @@ func (mgr *dbManager) save(target uint64) (uint64, error) { func (mgr *dbManager) DeleteVersion(ver uint64) error { if mgr.cpCache.has(ver) { - return db.ErrOpenTransactions + return types.ErrOpenTransactions } mgr.mtx.Lock() defer mgr.mtx.Unlock() @@ -284,8 +282,29 @@ func (mgr *dbManager) Revert() (err error) { mgr.mtx.RLock() defer mgr.mtx.RUnlock() if mgr.openWriters > 0 { - return db.ErrOpenTransactions + return types.ErrOpenTransactions + } + return mgr.revert(mgr.vmgr.Last()) +} + +func (mgr *dbManager) RevertTo(target uint64) (err error) { + mgr.mtx.RLock() + defer mgr.mtx.RUnlock() + if mgr.openWriters > 0 { + return types.ErrOpenTransactions } + if !mgr.vmgr.Exists(target) { + return types.ErrVersionDoesNotExist + } + err = mgr.revert(target) + if err != nil { + return + } + mgr.vmgr.DeleteAbove(target) + return +} + +func (mgr *dbManager) revert(target uint64) (err error) { // Close current connection and replace it with a checkpoint (created from the last checkpoint) mgr.current.Close() dbPath := filepath.Join(mgr.dir, currentDBFileName) @@ -293,8 +312,8 @@ func (mgr *dbManager) Revert() (err error) { if err != nil { return } - if last := mgr.vmgr.Last(); last != 0 { - err = mgr.restoreFromCheckpoint(last, dbPath) + if target != 0 { // when target is 0, restore no checkpoints + err = mgr.restoreFromCheckpoint(target, dbPath) if err != nil { return } @@ -341,10 +360,10 @@ func (mgr *dbManager) Stats() map[string]string { // Get implements Reader. func (tx *dbTxn) Get(key []byte) ([]byte, error) { if tx.txn == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if len(key) == 0 { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } res, err := tx.txn.Get(tx.mgr.opts.ro, key) if err != nil { @@ -356,10 +375,10 @@ func (tx *dbTxn) Get(key []byte) ([]byte, error) { // Get implements Reader. func (tx *dbWriter) Get(key []byte) ([]byte, error) { if tx.txn == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if len(key) == 0 { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } res, err := tx.txn.GetForUpdate(tx.mgr.opts.ro, key) if err != nil { @@ -368,7 +387,7 @@ func (tx *dbWriter) Get(key []byte) ([]byte, error) { return moveSliceToBytes(res), nil } -// Has implements DBReader. +// Has implements Reader. func (tx *dbTxn) Has(key []byte) (bool, error) { bytes, err := tx.Get(key) if err != nil { @@ -377,10 +396,10 @@ func (tx *dbTxn) Has(key []byte) (bool, error) { return bytes != nil, nil } -// Set implements DBWriter. +// Set implements Writer. func (tx *dbWriter) Set(key []byte, value []byte) error { if tx.txn == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } if err := dbutil.ValidateKv(key, value); err != nil { return err @@ -388,20 +407,20 @@ func (tx *dbWriter) Set(key []byte, value []byte) error { return tx.txn.Put(key, value) } -// Delete implements DBWriter. +// Delete implements Writer. func (tx *dbWriter) Delete(key []byte) error { if tx.txn == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } if len(key) == 0 { - return db.ErrKeyEmpty + return types.ErrKeyEmpty } return tx.txn.Delete(key) } func (tx *dbWriter) Commit() (err error) { if tx.txn == nil { - return db.ErrTransactionClosed + return types.ErrTransactionClosed } defer func() { err = dbutil.CombineErrors(err, tx.Discard(), "Discard also failed") }() err = tx.txn.Commit() @@ -429,25 +448,25 @@ func (tx *dbWriter) Discard() error { return tx.dbTxn.Discard() } -// Iterator implements DBReader. -func (tx *dbTxn) Iterator(start, end []byte) (db.Iterator, error) { +// Iterator implements Reader. +func (tx *dbTxn) Iterator(start, end []byte) (types.Iterator, error) { if tx.txn == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } itr := tx.txn.NewIterator(tx.mgr.opts.ro) return newRocksDBIterator(itr, start, end, false), nil } -// ReverseIterator implements DBReader. -func (tx *dbTxn) ReverseIterator(start, end []byte) (db.Iterator, error) { +// ReverseIterator implements Reader. +func (tx *dbTxn) ReverseIterator(start, end []byte) (types.Iterator, error) { if tx.txn == nil { - return nil, db.ErrTransactionClosed + return nil, types.ErrTransactionClosed } if (start != nil && len(start) == 0) || (end != nil && len(end) == 0) { - return nil, db.ErrKeyEmpty + return nil, types.ErrKeyEmpty } itr := tx.txn.NewIterator(tx.mgr.opts.ro) return newRocksDBIterator(itr, start, end, true), nil diff --git a/db/rocksdb/db_test.go b/db/rocksdb/db_test.go index 69065c462e6a..0d67b55e6afc 100644 --- a/db/rocksdb/db_test.go +++ b/db/rocksdb/db_test.go @@ -1,5 +1,3 @@ -//go:build rocksdb_build - package rocksdb import ( @@ -9,11 +7,11 @@ import ( "github.com/stretchr/testify/require" - "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/db/dbtest" + "github.com/cosmos/cosmos-sdk/db/types" ) -func load(t *testing.T, dir string) db.Connection { +func load(t *testing.T, dir string) types.Connection { d, err := NewDB(dir) require.NoError(t, err) return d diff --git a/db/rocksdb/iterator.go b/db/rocksdb/iterator.go index e760c7507ed5..231d7dfbedb5 100644 --- a/db/rocksdb/iterator.go +++ b/db/rocksdb/iterator.go @@ -1,11 +1,9 @@ -//go:build rocksdb_build - package rocksdb import ( "bytes" - "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/types" "github.com/cosmos/gorocksdb" ) @@ -18,7 +16,7 @@ type rocksDBIterator struct { primed bool } -var _ db.Iterator = (*rocksDBIterator)(nil) +var _ types.Iterator = (*rocksDBIterator)(nil) func newRocksDBIterator(source *gorocksdb.Iterator, start, end []byte, isReverse bool) *rocksDBIterator { if isReverse { diff --git a/db/adapter.go b/db/types/adapter.go similarity index 93% rename from db/adapter.go rename to db/types/adapter.go index 1988e74af9d6..6c72bdb69eb0 100644 --- a/db/adapter.go +++ b/db/types/adapter.go @@ -1,4 +1,4 @@ -package db +package types type readerRWAdapter struct{ Reader } @@ -18,6 +18,5 @@ func (readerRWAdapter) Delete([]byte) error { } func (rw readerRWAdapter) Commit() error { - rw.Discard() - return nil + return rw.Discard() } diff --git a/db/types/creator.go b/db/types/creator.go new file mode 100644 index 000000000000..7492465bebae --- /dev/null +++ b/db/types/creator.go @@ -0,0 +1,55 @@ +package types + +import ( + "fmt" + "strings" +) + +type BackendType string + +// These are valid backend types. +const ( + // MemDBBackend represents in-memory key value store, which is mostly used + // for testing. + MemDBBackend BackendType = "memdb" + // RocksDBBackend represents rocksdb (uses github.com/cosmos/gorocksdb) + // - EXPERIMENTAL + // - requires gcc + // - use rocksdb build tag (go build -tags rocksdb) + RocksDBBackend BackendType = "rocksdb" + // BadgerDBBackend represents BadgerDB + // - pure Go + // - requires badgerdb build tag + BadgerDBBackend BackendType = "badgerdb" +) + +type DBCreator func(name string, dir string) (Connection, error) + +var backends = map[BackendType]DBCreator{} + +func RegisterCreator(backend BackendType, creator DBCreator, force bool) { + _, ok := backends[backend] + if !force && ok { + return + } + backends[backend] = creator +} + +// NewDB creates a new database of type backend with the given name. +func NewDB(name string, backend BackendType, dir string) (Connection, error) { + creator, ok := backends[backend] + if !ok { + keys := make([]string, 0, len(backends)) + for k := range backends { + keys = append(keys, string(k)) + } + return nil, fmt.Errorf("unknown App DB backend %s, expected one of %v", + backend, strings.Join(keys, ",")) + } + + db, err := creator(name, dir) + if err != nil { + return nil, fmt.Errorf("failed to initialize database: %w", err) + } + return db, nil +} diff --git a/db/types.go b/db/types/types.go similarity index 91% rename from db/types.go rename to db/types/types.go index a800672bebfe..39e48eab0ed6 100644 --- a/db/types.go +++ b/db/types/types.go @@ -1,4 +1,4 @@ -package db +package types import "errors" @@ -49,27 +49,30 @@ type Connection interface { // SaveNextVersion saves the current contents of the database and returns the next version ID, // which will be `Versions().Last()+1`. - // Returns an error if any open DBWriter transactions exist. - // TODO: rename to something more descriptive? + // Returns an error if any open Writer transactions exist. SaveNextVersion() (uint64, error) // SaveVersion attempts to save database at a specific version ID, which must be greater than or // equal to what would be returned by `SaveNextVersion`. - // Returns an error if any open DBWriter transactions exist. + // Returns an error if any open Writer transactions exist. SaveVersion(uint64) error // DeleteVersion deletes a saved version. Returns ErrVersionDoesNotExist for invalid versions. DeleteVersion(uint64) error // Revert reverts the DB state to the last saved version; if none exist, this clears the DB. - // Returns an error if any open DBWriter transactions exist. + // Returns an error if any open Writer transactions exist. Revert() error + // RevertTo reverts the DB state to the given version. Returns ErrVersionDoesNotExist for invalid versions. + // Returns an error if any open Writer transactions exist. + RevertTo(uint64) error + // Close closes the database connection. Close() error } -// DBReader is a read-only transaction interface. It is safe for concurrent access. +// Reader is a read-only transaction interface. It is safe for concurrent access. // Callers must call Discard when done with the transaction. // // Keys cannot be nil or empty, while values cannot be nil. Keys and values should be considered @@ -104,7 +107,7 @@ type Reader interface { Discard() error } -// DBWriter is a write-only transaction interface. +// Writer is a write-only transaction interface. // It is safe for concurrent writes, following an optimistic (OCC) strategy, detecting any write // conflicts and returning an error on commit, rather than locking the DB. // Callers must call Commit or Discard when done with the transaction. @@ -126,7 +129,7 @@ type Writer interface { Discard() error } -// DBReadWriter is a transaction interface that allows both reading and writing. +// ReadWriter is a transaction interface that allows both reading and writing. type ReadWriter interface { Reader Writer @@ -141,7 +144,7 @@ type ReadWriter interface { // Note that the iterator is invalid on construction: Next() must be called to initialize it to its // starting position. // -// As with DBReader, keys and values should be considered read-only, and must be copied before they are +// As with Reader, keys and values should be considered read-only, and must be copied before they are // modified. // // Typical usage: diff --git a/db/types/version_manager.go b/db/types/version_manager.go new file mode 100644 index 000000000000..01612ab8b998 --- /dev/null +++ b/db/types/version_manager.go @@ -0,0 +1,136 @@ +package types + +import ( + "fmt" + "sort" +) + +// VersionManager encapsulates the current valid versions of a DB and computes +// the next version. +type VersionManager struct { + versions []uint64 +} + +var _ VersionSet = (*VersionManager)(nil) + +// NewVersionManager creates a VersionManager from a slice of version ids. +func NewVersionManager(versions []uint64) *VersionManager { + vs := make([]uint64, len(versions)) + copy(vs, versions) + sort.Slice(vs, func(i, j int) bool { return vs[i] < vs[j] }) + return &VersionManager{vs} +} + +// Exists implements VersionSet. +func (vm *VersionManager) Exists(version uint64) bool { + _, has := binarySearch(vm.versions, version) + return has +} + +// Last implements VersionSet. +func (vm *VersionManager) Last() uint64 { + if len(vm.versions) == 0 { + return 0 + } + return vm.versions[len(vm.versions)-1] +} + +func (vm *VersionManager) Initial() uint64 { + if len(vm.versions) == 0 { + return 0 + } + return vm.versions[0] +} + +func (vm *VersionManager) Save(target uint64) (uint64, error) { + next := vm.Last() + 1 + if target == 0 { + target = next + } else if target < next { + return 0, fmt.Errorf( + "target version cannot be less than next sequential version (%v < %v)", target, next) + } + if vm.Exists(target) { + return 0, fmt.Errorf("version exists: %v", target) + } + vm.versions = append(vm.versions, target) + return target, nil +} + +func (vm *VersionManager) Delete(target uint64) { + i, has := binarySearch(vm.versions, target) + if !has { + return + } + vm.versions = append(vm.versions[:i], vm.versions[i+1:]...) +} + +func (vm *VersionManager) DeleteAbove(target uint64) { + var iFrom *int + for i, v := range vm.versions { + if iFrom == nil && v > target { + iFrom = new(int) + *iFrom = i + } + } + if iFrom != nil { + vm.versions = vm.versions[:*iFrom] + } +} + +type vmIterator struct { + vmgr *VersionManager + i int +} + +func (vi *vmIterator) Next() bool { + vi.i++ + return vi.i < len(vi.vmgr.versions) +} +func (vi *vmIterator) Value() uint64 { return vi.vmgr.versions[vi.i] } + +// Iterator implements VersionSet. +func (vm *VersionManager) Iterator() VersionIterator { + return &vmIterator{vm, -1} +} + +// Count implements VersionSet. +func (vm *VersionManager) Count() int { return len(vm.versions) } + +// Equal implements VersionSet. +func (vm *VersionManager) Equal(that VersionSet) bool { + if vm.Count() != that.Count() { + return false + } + for i, it := 0, that.Iterator(); it.Next(); { + if vm.versions[i] != it.Value() { + return false + } + i++ + } + return true +} + +func (vm *VersionManager) Copy() *VersionManager { + vs := make([]uint64, len(vm.versions)) + copy(vs, vm.versions) + return &VersionManager{vs} +} + +// Returns closest index and whether it's a match +func binarySearch(hay []uint64, ndl uint64) (int, bool) { + var mid int + from, to := 0, len(hay)-1 + for from <= to { + mid = (from + to) / 2 + switch { + case hay[mid] < ndl: + from = mid + 1 + case hay[mid] > ndl: + to = mid - 1 + default: + return mid, true + } + } + return from, false +} diff --git a/db/types/version_manager_test.go b/db/types/version_manager_test.go new file mode 100644 index 000000000000..75799a665e56 --- /dev/null +++ b/db/types/version_manager_test.go @@ -0,0 +1,13 @@ +package types_test + +import ( + "testing" + + "github.com/cosmos/cosmos-sdk/db/dbtest" + "github.com/cosmos/cosmos-sdk/db/types" +) + +func TestVersionManager(t *testing.T) { + new := func(vs []uint64) types.VersionSet { return types.NewVersionManager(vs) } + dbtest.DoTestVersionSet(t, new) +} diff --git a/db/version_manager.go b/db/version_manager.go deleted file mode 100644 index b8a2a6b055b5..000000000000 --- a/db/version_manager.go +++ /dev/null @@ -1,132 +0,0 @@ -package db - -import ( - "fmt" -) - -// VersionManager encapsulates the current valid versions of a DB and computes -// the next version. -type VersionManager struct { - versions map[uint64]struct{} - initial, last uint64 -} - -var _ VersionSet = (*VersionManager)(nil) - -// NewVersionManager creates a VersionManager from a slice of version ids. -func NewVersionManager(versions []uint64) *VersionManager { - vmap := make(map[uint64]struct{}) - var init, last uint64 - for _, ver := range versions { - vmap[ver] = struct{}{} - if init == 0 || ver < init { - init = ver - } - if ver > last { - last = ver - } - } - return &VersionManager{versions: vmap, initial: init, last: last} -} - -// Exists implements VersionSet. -func (vm *VersionManager) Exists(version uint64) bool { - _, has := vm.versions[version] - return has -} - -// Last implements VersionSet. -func (vm *VersionManager) Last() uint64 { - return vm.last -} - -func (vm *VersionManager) Initial() uint64 { - return vm.initial -} - -func (vm *VersionManager) Save(target uint64) (uint64, error) { - next := vm.Last() + 1 - if target == 0 { - target = next - } else if target < next { - return 0, fmt.Errorf( - "target version cannot be less than next sequential version (%v < %v)", target, next) - } - if _, has := vm.versions[target]; has { - return 0, fmt.Errorf("version exists: %v", target) - } - - vm.versions[target] = struct{}{} - vm.last = target - if len(vm.versions) == 1 { - vm.initial = target - } - return target, nil -} - -func findLimit(m map[uint64]struct{}, cmp func(uint64, uint64) bool, init uint64) uint64 { - for x := range m { - if cmp(x, init) { - init = x - } - } - return init -} - -func (vm *VersionManager) Delete(target uint64) { - delete(vm.versions, target) - if target == vm.last { - vm.last = findLimit(vm.versions, func(x, max uint64) bool { return x > max }, 0) - } - if target == vm.initial { - vm.initial = findLimit(vm.versions, func(x, min uint64) bool { return x < min }, vm.last) - } -} - -type vmIterator struct { - ch <-chan uint64 - open bool - buf uint64 -} - -func (vi *vmIterator) Next() bool { - vi.buf, vi.open = <-vi.ch - return vi.open -} -func (vi *vmIterator) Value() uint64 { return vi.buf } - -// Iterator implements VersionSet. -func (vm *VersionManager) Iterator() VersionIterator { - ch := make(chan uint64) - go func() { - for ver := range vm.versions { - ch <- ver - } - close(ch) - }() - return &vmIterator{ch: ch} -} - -// Count implements VersionSet. -func (vm *VersionManager) Count() int { return len(vm.versions) } - -// Equal implements VersionSet. -func (vm *VersionManager) Equal(that VersionSet) bool { - if vm.Count() != that.Count() { - return false - } - for it := that.Iterator(); it.Next(); { - if !vm.Exists(it.Value()) { - return false - } - } - return true -} - -func (vm *VersionManager) Copy() *VersionManager { - vmap := make(map[uint64]struct{}) - for ver := range vm.versions { - vmap[ver] = struct{}{} - } - return &VersionManager{versions: vmap, initial: vm.initial, last: vm.last} -} diff --git a/docs/core/store.md b/docs/core/store.md index 5852f3a89c38..49714784e1b0 100644 --- a/docs/core/store.md +++ b/docs/core/store.md @@ -234,9 +234,9 @@ When `KVStore.Set` or `KVStore.Delete` methods are called, `listenkv.Store` auto ## New Store package (`store/v2alpha1`) -The SDK is in the process of transitioning to use the types listed here as the default interface for state storage. At the time of writing, these cannot be used within an application and are not directly compatible with the `CommitMultiStore` and related types. +The SDK is in the process of transitioning to use the types in this package as the default interface for state storage. Note that these types are not all directly compatible with the types in `store/types`. -These types use the new `db` sub-module of Cosmos-SDK (`github.com/cosmos/cosmos-sdk/db`), rather than `tmdb` (`github.com/tendermint/tm-db`). +This package uses the new `db` sub-module of Cosmos-SDK (`github.com/cosmos/cosmos-sdk/db`), rather than `tmdb` (`github.com/tendermint/tm-db`). See [ADR-040](../architecture/adr-040-storage-and-smt-state-commitments.md) for the motivations and design specifications of the change. @@ -256,13 +256,13 @@ This is the new interface (or, set of interfaces) for the main client store, rep This is the main interface for persisent application state, analogous to the original `CommitMultiStore`. -* Past version views are accessed with `GetVersion`, which returns a `BasicMultiStore`. -* Substores are accessed with `GetKVStore`. Trying to get a substore that was not defined at initialization will cause a panic. -* `Close` must be called to release the DB resources being used by the store. + * Past version views are accessed with `GetVersion`, which returns a `BasicMultiStore`. + * Substores are accessed with `GetKVStore`. Trying to get a substore that was not defined at initialization will cause a panic. + * `Close` must be called to release the DB resources being used by the store. -### `BasicMultiStore` +### `MultiStore` -A minimal interface that only allows accessing substores. Note: substores returned by `BasicMultiStore.GetKVStore` are read-only and will panic on `Set` or `Delete` calls. +A minimal interface that only allows accessing substores. Note: substores returned by `MultiStore.GetKVStore` are read-only and will panic on `Set` or `Delete` calls. ### Implementation (`root.Store`) diff --git a/docs/core/upgrade.md b/docs/core/upgrade.md index bcccd4717227..7919f846cbcc 100644 --- a/docs/core/upgrade.md +++ b/docs/core/upgrade.md @@ -75,7 +75,7 @@ You can introduce entirely new modules to the application during an upgrade. New ### Add StoreUpgrades for New Modules -All chains preparing to run in-place store migrations will need to manually add store upgrades for new modules and then configure the store loader to apply those upgrades. This ensures that the new module's stores are added to the multistore before the migrations begin. +All chains preparing to run in-place store migrations will need to manually add store upgrades for new modules and then configure the store to apply those upgrades. This ensures that the new module's stores are added to the multistore before the migrations begin. ```go upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() @@ -91,8 +91,8 @@ if upgradeInfo.Name == "my-plan" && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo. // ... } - // configure store loader that checks if version == upgradeHeight and applies store upgrades - app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) + // configure store option that checks if version == upgradeHeight and applies store upgrades + app.SetStoreOption(upgradetypes.UpgradeStoreOption(upgradeInfo.Height, &storeUpgrades)) } ``` diff --git a/go.mod b/go.mod index 8757c6847540..8a6e92d49e48 100644 --- a/go.mod +++ b/go.mod @@ -108,6 +108,7 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/denis-tingaikin/go-header v0.4.3 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect + github.com/dgraph-io/badger/v3 v3.2103.2 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect github.com/dustin/go-humanize v1.0.0 // indirect @@ -146,6 +147,7 @@ require ( github.com/golangci/revgrep v0.0.0-20210930125155-c22e5001d4f2 // indirect github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect github.com/google/btree v1.0.1 // indirect + github.com/google/flatbuffers v2.0.0+incompatible // indirect github.com/google/go-cmp v0.5.8 // indirect github.com/google/orderedcode v0.0.1 // indirect github.com/googleapis/gax-go/v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index 41d13e157c13..36be613d7dcc 100644 --- a/go.sum +++ b/go.sum @@ -277,6 +277,8 @@ github.com/cosmos/cosmos-proto v1.0.0-alpha7/go.mod h1:dosO4pSAbJF8zWCzCoTWP7nNs github.com/cosmos/go-bip39 v1.0.0 h1:pcomnQdrdH22njcAatO0yWojsUnCO3y2tNoV1cb6hHY= github.com/cosmos/go-bip39 v1.0.0/go.mod h1:RNJv0H/pOIVgxw6KS7QeX2a0Uo0aKUlfhZ4xuwvCdJw= github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= +github.com/cosmos/gorocksdb v1.2.0 h1:d0l3jJG8M4hBouIZq0mDUHZ+zjOx044J3nGRskwTb4Y= +github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= github.com/cosmos/gorocksdb v1.2.0/go.mod h1:aaKvKItm514hKfNJpUJXnnOWeBnk2GL4+Qw9NHizILw= github.com/cosmos/iavl v0.19.0 h1:sgyrjqOkycXiN7Tuupuo4QAldKFg7Sipyfeg/IL7cps= github.com/cosmos/iavl v0.19.0/go.mod h1:l5h9pAB3m5fihB3pXVgwYqdY8aBsMagqz7T0MUjxZeA= @@ -319,6 +321,8 @@ github.com/desertbit/timer v0.0.0-20180107155436-c41aec40b27f/go.mod h1:xH/i4TFM github.com/dgraph-io/badger/v2 v2.2007.2/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= github.com/dgraph-io/badger/v2 v2.2007.4/go.mod h1:vSw/ax2qojzbN6eXHIx6KPKtCSHJN/Uz0X0VPruTIhk= +github.com/dgraph-io/badger/v3 v3.2103.2 h1:dpyM5eCJAtQCBcMCZcT4UBZchuTJgCywerHHgmxfxM8= +github.com/dgraph-io/badger/v3 v3.2103.2/go.mod h1:RHo4/GmYcKKh5Lxu63wLEMHJ70Pac2JqZRYGhlyAo2M= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= @@ -547,6 +551,9 @@ github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= +github.com/google/flatbuffers v1.12.1/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/flatbuffers v2.0.0+incompatible h1:dicJ2oXwypfwUGnB2/TYWYEKiuk9eYQlQO/AnOHl5mI= +github.com/google/flatbuffers v2.0.0+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= diff --git a/internal/db/iterator_adapter.go b/internal/db/iterator_adapter.go index 8bf717041d57..e98b5800336a 100644 --- a/internal/db/iterator_adapter.go +++ b/internal/db/iterator_adapter.go @@ -12,7 +12,7 @@ type AsStoreIter struct { valid bool } -// DBToStoreIterator returns an iterator wrapping the given iterator so that it satisfies the +// ToStoreIterator returns an iterator wrapping the given iterator so that it satisfies the // (store/types).Iterator interface. func ToStoreIterator(source dbm.Iterator) *AsStoreIter { ret := &AsStoreIter{Iterator: source} diff --git a/internal/db/tmdb_adapter.go b/internal/db/tmdb_adapter.go new file mode 100644 index 000000000000..0bece64cf675 --- /dev/null +++ b/internal/db/tmdb_adapter.go @@ -0,0 +1,170 @@ +// Adapters used to wrap objects supporting cosmos-sdk/db interface so that they support +// the tm-db interface. +// +// This serves as a transitional step in introducing the new DB interface while maintaining +// compatibility with existing code that expects the old interface. +package db + +import ( + "errors" + + dbm "github.com/cosmos/cosmos-sdk/db/types" + + tmdb "github.com/tendermint/tm-db" +) + +// TmdbTxnAdapter adapter wraps a single ReadWriter. +// Calling *Sync methods performs a commit and closes the transaction, invalidating it. +type TmdbTxnAdapter struct { + dbm.ReadWriter +} + +// TmdbConnAdapter wraps a DBConnection and a current transaction. +// When calling a *Sync method, a commit is performed and the transaction refreshed. +type TmdbConnAdapter struct { + dbm.ReadWriter + Connection dbm.Connection +} +type tmdbBatchAdapter struct { + *TmdbConnAdapter + closed bool +} + +var ( + _ tmdb.DB = (*TmdbTxnAdapter)(nil) + _ tmdb.DB = (*TmdbConnAdapter)(nil) +) + +// ReadWriterAsTmdb returns a tmdb.DB which wraps a ReadWriter. +// Calling *Sync methods performs a commit and closes the transaction. +func ReadWriterAsTmdb(rw dbm.ReadWriter) TmdbTxnAdapter { return TmdbTxnAdapter{rw} } + +// ConnectionAsTmdb returns a tmdb.DB which wraps a DBConnection. +func ConnectionAsTmdb(db dbm.Connection) *TmdbConnAdapter { + return &TmdbConnAdapter{db.ReadWriter(), db} +} + +func (d TmdbTxnAdapter) DeleteSync(k []byte) error { + err := d.ReadWriter.Delete(k) + if err != nil { + return err + } + return d.Commit() +} +func (d TmdbTxnAdapter) SetSync(k, v []byte) error { + err := d.ReadWriter.Set(k, v) + if err != nil { + return err + } + return d.Commit() +} + +func (d TmdbTxnAdapter) Iterator(s, e []byte) (tmdb.Iterator, error) { + it, err := d.ReadWriter.Iterator(s, e) + if err != nil { + return nil, err + } + return ToStoreIterator(it), nil +} +func (d TmdbTxnAdapter) ReverseIterator(s, e []byte) (tmdb.Iterator, error) { + it, err := d.ReadWriter.ReverseIterator(s, e) + if err != nil { + return nil, err + } + return ToStoreIterator(it), nil +} + +func (d TmdbTxnAdapter) Close() error { return d.ReadWriter.Discard() } +func (d TmdbTxnAdapter) NewBatch() tmdb.Batch { return d } +func (d TmdbTxnAdapter) Print() error { return nil } +func (d TmdbTxnAdapter) Stats() map[string]string { return nil } + +func (d TmdbTxnAdapter) Write() error { return d.Commit() } +func (d TmdbTxnAdapter) WriteSync() error { return d.Commit() } + +// TmdbConnAdapter: + +func (d *TmdbConnAdapter) Close() error { d.CloseTx(); return d.Connection.Close() } +func (d *TmdbConnAdapter) CloseTx() error { return d.ReadWriter.Discard() } + +func (d *TmdbConnAdapter) sync() error { + err := d.ReadWriter.Commit() + if err != nil { + return err + } + d.ReadWriter = d.Connection.ReadWriter() + return nil +} +func (d *TmdbConnAdapter) DeleteSync(k []byte) error { + err := d.ReadWriter.Delete(k) + if err != nil { + return err + } + return d.sync() +} +func (d *TmdbConnAdapter) SetSync(k, v []byte) error { + err := d.ReadWriter.Set(k, v) + if err != nil { + return err + } + return d.sync() +} + +func (d *TmdbConnAdapter) Commit() (uint64, error) { + err := d.ReadWriter.Commit() + if err != nil { + return 0, err + } + v, err := d.Connection.SaveNextVersion() + if err != nil { + return 0, err + } + d.ReadWriter = d.Connection.ReadWriter() + return v, err +} + +func (d *TmdbConnAdapter) Iterator(s, e []byte) (tmdb.Iterator, error) { + it, err := d.ReadWriter.Iterator(s, e) + if err != nil { + return nil, err + } + return ToStoreIterator(it), nil +} +func (d *TmdbConnAdapter) ReverseIterator(s, e []byte) (tmdb.Iterator, error) { + it, err := d.ReadWriter.ReverseIterator(s, e) + if err != nil { + return nil, err + } + return ToStoreIterator(it), nil +} + +// NewBatch returns a tmdb.Batch which wraps a DBWriter. +func (d *TmdbConnAdapter) NewBatch() tmdb.Batch { + return &tmdbBatchAdapter{d, false} +} +func (d *TmdbConnAdapter) Print() error { return nil } +func (d *TmdbConnAdapter) Stats() map[string]string { return nil } + +var errClosed = errors.New("batch is closed") + +func (d *tmdbBatchAdapter) Set(k, v []byte) error { + if d.closed { + return errClosed + } + return d.TmdbConnAdapter.Set(k, v) +} +func (d *tmdbBatchAdapter) Delete(k []byte) error { + if d.closed { + return errClosed + } + return d.TmdbConnAdapter.Delete(k) +} +func (d *tmdbBatchAdapter) WriteSync() error { + if d.closed { + return errClosed + } + d.closed = true + return d.sync() +} +func (d *tmdbBatchAdapter) Write() error { return d.WriteSync() } +func (d *tmdbBatchAdapter) Close() error { d.closed = true; return nil } diff --git a/pruning/manager.go b/pruning/manager.go index ac1172ebb86e..365d3cd3572f 100644 --- a/pruning/manager.go +++ b/pruning/manager.go @@ -6,7 +6,6 @@ import ( "fmt" "sync" - "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/pruning/types" @@ -16,8 +15,6 @@ import ( // determining when to prune old heights of the store // based on the strategy described by the pruning options. type Manager struct { - db dbm.DB - logger log.Logger opts types.PruningOptions snapshotInterval uint64 // Although pruneHeights happen in the same goroutine with the normal execution, @@ -49,14 +46,12 @@ var ( pruneSnapshotHeightsKey = []byte("s/prunesnapshotheights") ) -// NewManager returns a new Manager with the given db and logger. +// NewManager returns a new Manager with the given db. // The retuned manager uses a pruning strategy of "nothing" which // keeps all heights. Users of the Manager may change the strategy // by calling SetOptions. -func NewManager(db dbm.DB, logger log.Logger) *Manager { +func NewManager() *Manager { return &Manager{ - db: db, - logger: logger, opts: types.NewPruningOptions(types.PruningNothing), pruneHeights: []int64{}, pruneSnapshotHeights: list.New(), @@ -75,7 +70,7 @@ func (m *Manager) GetOptions() types.PruningOptions { // GetFlushAndResetPruningHeights returns all heights to be pruned during the next call to Prune(). // It also flushes and resets the pruning heights. -func (m *Manager) GetFlushAndResetPruningHeights() ([]int64, error) { +func (m *Manager) GetFlushAndResetPruningHeights(db dbm.DB) ([]int64, error) { if m.opts.GetPruningStrategy() == types.PruningNothing { return []int64{}, nil } @@ -83,7 +78,7 @@ func (m *Manager) GetFlushAndResetPruningHeights() ([]int64, error) { defer m.pruneHeightsMx.Unlock() // flush the updates to disk so that it is not lost if crash happens. - if err := m.db.SetSync(pruneHeightsKey, int64SliceToBytes(m.pruneHeights)); err != nil { + if err := db.SetSync(pruneHeightsKey, int64SliceToBytes(m.pruneHeights)); err != nil { return nil, err } @@ -99,7 +94,7 @@ func (m *Manager) GetFlushAndResetPruningHeights() ([]int64, error) { // the pruning strategy. Returns previousHeight, if it was kept to be pruned at the next call to Prune(), 0 otherwise. // previousHeight must be greater than 0 for the handling to take effect since valid heights start at 1 and 0 represents // the latest height. The latest height cannot be pruned. As a result, if previousHeight is less than or equal to 0, 0 is returned. -func (m *Manager) HandleHeight(previousHeight int64) int64 { +func (m *Manager) HandleHeight(previousHeight int64, db dbm.DB) int64 { if m.opts.GetPruningStrategy() == types.PruningNothing || previousHeight <= 0 { return 0 } @@ -128,7 +123,7 @@ func (m *Manager) HandleHeight(previousHeight int64) int64 { } // flush the updates to disk so that they are not lost if crash happens. - if err := m.db.SetSync(pruneHeightsKey, int64SliceToBytes(m.pruneHeights)); err != nil { + if err := db.SetSync(pruneHeightsKey, int64SliceToBytes(m.pruneHeights)); err != nil { panic(err) } }() @@ -155,7 +150,7 @@ func (m *Manager) HandleHeight(previousHeight int64) int64 { // height defined by the pruning strategy. Flushes the update to disk and panics if the flush fails // The input height must be greater than 0 and pruning strategy any but pruning nothing. // If one of these conditions is not met, this function does nothing. -func (m *Manager) HandleHeightSnapshot(height int64) { +func (m *Manager) HandleHeightSnapshot(height int64, db dbm.DB) { if m.opts.GetPruningStrategy() == types.PruningNothing || height <= 0 { return } @@ -163,11 +158,10 @@ func (m *Manager) HandleHeightSnapshot(height int64) { m.pruneSnapshotHeightsMx.Lock() defer m.pruneSnapshotHeightsMx.Unlock() - m.logger.Debug("HandleHeightSnapshot", "height", height) m.pruneSnapshotHeights.PushBack(height) // flush the updates to disk so that they are not lost if crash happens. - if err := m.db.SetSync(pruneSnapshotHeightsKey, listToBytes(m.pruneSnapshotHeights)); err != nil { + if err := db.SetSync(pruneSnapshotHeightsKey, listToBytes(m.pruneSnapshotHeights)); err != nil { panic(err) } } diff --git a/pruning/manager_test.go b/pruning/manager_test.go index 85d38e8c8af7..d9c59e05437e 100644 --- a/pruning/manager_test.go +++ b/pruning/manager_test.go @@ -8,7 +8,6 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/libs/log" db "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/pruning" @@ -19,10 +18,11 @@ import ( const dbErr = "db error" func TestNewManager(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + data := db.NewMemDB() + manager := pruning.NewManager() require.NotNil(t, manager) - heights, err := manager.GetFlushAndResetPruningHeights() + heights, err := manager.GetFlushAndResetPruningHeights(data) require.NoError(t, err) require.NotNil(t, heights) require.Equal(t, types.PruningNothing, manager.GetOptions().GetPruningStrategy()) @@ -79,7 +79,8 @@ func TestStrategies(t *testing.T) { }, } - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + data := db.NewMemDB() + manager := pruning.NewManager() require.NotNil(t, manager) @@ -113,10 +114,10 @@ func TestStrategies(t *testing.T) { curInterval := curStrategy.Interval for curHeight := int64(0); curHeight < 110000; curHeight++ { - handleHeightActual := manager.HandleHeight(curHeight) + handleHeightActual := manager.HandleHeight(curHeight, data) shouldPruneAtHeightActual := manager.ShouldPruneAtHeight(curHeight) - curPruningHeihts, err := manager.GetFlushAndResetPruningHeights() + curPruningHeihts, err := manager.GetFlushAndResetPruningHeights(data) require.Nil(t, err) curHeightStr := fmt.Sprintf("height: %d", curHeight) @@ -126,7 +127,7 @@ func TestStrategies(t *testing.T) { require.Equal(t, int64(0), handleHeightActual, curHeightStr) require.False(t, shouldPruneAtHeightActual, curHeightStr) - heights, err := manager.GetFlushAndResetPruningHeights() + heights, err := manager.GetFlushAndResetPruningHeights(data) require.NoError(t, err) require.Equal(t, 0, len(heights)) default: @@ -138,13 +139,13 @@ func TestStrategies(t *testing.T) { } else { require.Equal(t, int64(0), handleHeightActual, curHeightStr) - heights, err := manager.GetFlushAndResetPruningHeights() + heights, err := manager.GetFlushAndResetPruningHeights(data) require.NoError(t, err) require.Equal(t, 0, len(heights)) } require.Equal(t, curHeight%int64(curInterval) == 0, shouldPruneAtHeightActual, curHeightStr) } - heights, err := manager.GetFlushAndResetPruningHeights() + heights, err := manager.GetFlushAndResetPruningHeights(data) require.NoError(t, err) require.Equal(t, 0, len(heights)) } @@ -195,14 +196,15 @@ func TestHandleHeight_Inputs(t *testing.T) { for name, tc := range testcases { t.Run(name, func(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + data := db.NewMemDB() + manager := pruning.NewManager() require.NotNil(t, manager) manager.SetOptions(types.NewPruningOptions(tc.strategy)) - handleHeightActual := manager.HandleHeight(tc.height) + handleHeightActual := manager.HandleHeight(tc.height, data) require.Equal(t, tc.expectedResult, handleHeightActual) - actualHeights, err := manager.GetFlushAndResetPruningHeights() + actualHeights, err := manager.GetFlushAndResetPruningHeights(data) require.NoError(t, err) require.Equal(t, len(tc.expectedHeights), len(actualHeights)) require.Equal(t, tc.expectedHeights, actualHeights) @@ -280,18 +282,18 @@ func TestHandleHeight_FlushLoadFromDisk(t *testing.T) { t.Run(name, func(t *testing.T) { // Setup db := db.NewMemDB() - manager := pruning.NewManager(db, log.NewNopLogger()) + manager := pruning.NewManager() require.NotNil(t, manager) manager.SetSnapshotInterval(tc.snapshotInterval) manager.SetOptions(types.NewCustomPruningOptions(uint64(tc.keepRecent), uint64(10))) for _, snapshotHeight := range tc.movedSnapshotHeights { - manager.HandleHeightSnapshot(snapshotHeight) + manager.HandleHeightSnapshot(snapshotHeight, db) } // Test HandleHeight and flush - handleHeightActual := manager.HandleHeight(tc.previousHeight) + handleHeightActual := manager.HandleHeight(tc.previousHeight, db) require.Equal(t, tc.expectedHandleHeightResult, handleHeightActual) loadedPruneHeights, err := pruning.LoadPruningHeights(db) @@ -302,7 +304,7 @@ func TestHandleHeight_FlushLoadFromDisk(t *testing.T) { err = manager.LoadPruningHeights(db) require.NoError(t, err) - heights, err := manager.GetFlushAndResetPruningHeights() + heights, err := manager.GetFlushAndResetPruningHeights(db) require.NoError(t, err) require.Equal(t, len(tc.expectedLoadedHeights), len(heights)) require.ElementsMatch(t, tc.expectedLoadedHeights, heights) @@ -318,7 +320,7 @@ func TestHandleHeight_DbErr_Panic(t *testing.T) { dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) - manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager := pruning.NewManager() manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) require.NotNil(t, manager) @@ -328,7 +330,7 @@ func TestHandleHeight_DbErr_Panic(t *testing.T) { } }() - manager.HandleHeight(10) + manager.HandleHeight(10, dbMock) } func TestHandleHeightSnapshot_FlushLoadFromDisk(t *testing.T) { @@ -336,14 +338,14 @@ func TestHandleHeightSnapshot_FlushLoadFromDisk(t *testing.T) { // Setup db := db.NewMemDB() - manager := pruning.NewManager(db, log.NewNopLogger()) + manager := pruning.NewManager() require.NotNil(t, manager) manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) for snapshotHeight := int64(-1); snapshotHeight < 100; snapshotHeight++ { // Test flush - manager.HandleHeightSnapshot(snapshotHeight) + manager.HandleHeightSnapshot(snapshotHeight, db) // Post test if snapshotHeight > 0 { @@ -372,7 +374,7 @@ func TestHandleHeightSnapshot_DbErr_Panic(t *testing.T) { dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) - manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager := pruning.NewManager() manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) require.NotNil(t, manager) @@ -382,12 +384,12 @@ func TestHandleHeightSnapshot_DbErr_Panic(t *testing.T) { } }() - manager.HandleHeightSnapshot(10) + manager.HandleHeightSnapshot(10, dbMock) } func TestFlushLoad(t *testing.T) { db := db.NewMemDB() - manager := pruning.NewManager(db, log.NewNopLogger()) + manager := pruning.NewManager() require.NotNil(t, manager) curStrategy := types.NewCustomPruningOptions(100, 15) @@ -403,7 +405,7 @@ func TestFlushLoad(t *testing.T) { heightsToPruneMirror := make([]int64, 0) for curHeight := int64(0); curHeight < 1000; curHeight++ { - handleHeightActual := manager.HandleHeight(curHeight) + handleHeightActual := manager.HandleHeight(curHeight, db) curHeightStr := fmt.Sprintf("height: %d", curHeight) @@ -416,7 +418,7 @@ func TestFlushLoad(t *testing.T) { } if manager.ShouldPruneAtHeight(curHeight) && curHeight > int64(keepRecent) { - actualHeights, err := manager.GetFlushAndResetPruningHeights() + actualHeights, err := manager.GetFlushAndResetPruningHeights(db) require.NoError(t, err) require.Equal(t, len(heightsToPruneMirror), len(actualHeights)) require.Equal(t, heightsToPruneMirror, actualHeights) @@ -424,7 +426,7 @@ func TestFlushLoad(t *testing.T) { err = manager.LoadPruningHeights(db) require.NoError(t, err) - actualHeights, err = manager.GetFlushAndResetPruningHeights() + actualHeights, err = manager.GetFlushAndResetPruningHeights(db) require.NoError(t, err) require.Equal(t, len(heightsToPruneMirror), len(actualHeights)) require.Equal(t, heightsToPruneMirror, actualHeights) @@ -436,7 +438,7 @@ func TestFlushLoad(t *testing.T) { func TestLoadPruningHeights(t *testing.T) { var ( - manager = pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + manager = pruning.NewManager() err error ) require.NotNil(t, manager) @@ -506,7 +508,7 @@ func TestLoadPruningHeights(t *testing.T) { } func TestLoadPruningHeights_PruneNothing(t *testing.T) { - manager := pruning.NewManager(db.NewMemDB(), log.NewNopLogger()) + manager := pruning.NewManager() require.NotNil(t, manager) manager.SetOptions(types.NewPruningOptions(types.PruningNothing)) @@ -522,11 +524,11 @@ func TestGetFlushAndResetPruningHeights_DbErr_Panic(t *testing.T) { dbMock.EXPECT().SetSync(gomock.Any(), gomock.Any()).Return(errors.New(dbErr)).Times(1) - manager := pruning.NewManager(dbMock, log.NewNopLogger()) + manager := pruning.NewManager() manager.SetOptions(types.NewPruningOptions(types.PruningEverything)) require.NotNil(t, manager) - heights, err := manager.GetFlushAndResetPruningHeights() + heights, err := manager.GetFlushAndResetPruningHeights(dbMock) require.Error(t, err) require.Nil(t, heights) } diff --git a/runtime/app.go b/runtime/app.go index f9e03d41943b..f7ec8163ec23 100644 --- a/runtime/app.go +++ b/runtime/app.go @@ -75,7 +75,7 @@ func (a *App) RegisterModules(modules ...module.AppModule) error { } // Load finishes all initialization operations and loads the app. -func (a *App) Load(loadLatest bool) error { +func (a *App) Load() error { a.configurator = module.NewConfigurator(a.cdc, a.MsgServiceRouter(), a.GRPCQueryRouter()) a.ModuleManager.RegisterServices(a.configurator) @@ -100,12 +100,6 @@ func (a *App) Load(loadLatest bool) error { a.SetEndBlocker(a.EndBlocker) } - if loadLatest { - if err := a.LoadLatestVersion(); err != nil { - return err - } - } - return nil } diff --git a/runtime/builder.go b/runtime/builder.go index f73805c17cac..51d09b27578c 100644 --- a/runtime/builder.go +++ b/runtime/builder.go @@ -5,9 +5,9 @@ import ( "io" "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/baseapp" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/version" ) @@ -27,20 +27,20 @@ func (a *AppBuilder) DefaultGenesis() map[string]json.RawMessage { // Build builds an *App instance. func (a *AppBuilder) Build( logger log.Logger, - db dbm.DB, + db dbm.Connection, traceStore io.Writer, - baseAppOptions ...func(*baseapp.BaseApp), + baseAppOptions ...baseapp.AppOption, ) *App { for _, option := range a.app.baseAppOptions { - baseAppOptions = append(baseAppOptions, option) + baseAppOptions = append(baseAppOptions, baseapp.AppOptionFunc(option)) } + baseAppOptions = append(baseAppOptions, baseapp.SetSubstores(a.app.storeKeys...)) bApp := baseapp.NewBaseApp(a.app.config.AppName, logger, db, nil, baseAppOptions...) bApp.SetMsgServiceRouter(a.app.msgServiceRouter) bApp.SetCommitMultiStoreTracer(traceStore) bApp.SetVersion(version.Version) bApp.SetInterfaceRegistry(a.app.interfaceRegistry) - bApp.MountStores(a.app.storeKeys...) a.app.BaseApp = bApp return a.app diff --git a/runtime/module.go b/runtime/module.go index 07ff9a430d73..865a2af9c120 100644 --- a/runtime/module.go +++ b/runtime/module.go @@ -23,6 +23,10 @@ type BaseAppOption func(*baseapp.BaseApp) // IsManyPerContainerType indicates that this is a depinject.ManyPerContainerType. func (b BaseAppOption) IsManyPerContainerType() {} +// // Implement base +// func (opt BaseAppOption) Apply(app *baseapp.BaseApp) { opt(app) } +// func (opt BaseAppOption) Order() baseapp.OptionOrder { return baseapp.OptionOrderDefault } + // appWrapper is used to pass around an instance of *App internally between // runtime dependency inject providers that is partially constructed (no // baseapp yet). diff --git a/server/config/config.go b/server/config/config.go index bd0ba7ae58ce..030963fc3a58 100644 --- a/server/config/config.go +++ b/server/config/config.go @@ -244,7 +244,7 @@ func DefaultConfig() *Config { MinRetainBlocks: 0, IndexEvents: make([]string, 0), IAVLCacheSize: 781250, // 50 MB - AppDBBackend: "", + AppDBBackend: "badgerdb", }, Telemetry: telemetry.Config{ Enabled: false, diff --git a/server/constructors_test.go b/server/constructors_test.go index 646311b45720..154c30e37288 100644 --- a/server/constructors_test.go +++ b/server/constructors_test.go @@ -6,12 +6,12 @@ import ( "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" + dbm "github.com/cosmos/cosmos-sdk/db" ) func Test_openDB(t *testing.T) { t.Parallel() - _, err := openDB(t.TempDir(), dbm.GoLevelDBBackend) + _, err := openDB(t.TempDir(), dbm.BadgerDBBackend) require.NoError(t, err) } diff --git a/server/mock/app.go b/server/mock/app.go index 57dd93373de2..d7e36aef9b1a 100644 --- a/server/mock/app.go +++ b/server/mock/app.go @@ -6,14 +6,13 @@ import ( "fmt" "path/filepath" - "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" bam "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/db/badgerdb" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -22,7 +21,7 @@ import ( // similar to a real app. Make sure rootDir is empty before running the test, // in order to guarantee consistent results func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { - db, err := db.NewGoLevelDB("mock", filepath.Join(rootDir, "data")) + db, err := badgerdb.NewDB(filepath.Join(rootDir, "mock")) if err != nil { return nil, err } @@ -31,21 +30,14 @@ func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { capKeyMainStore := sdk.NewKVStoreKey("main") // Create BaseApp. - baseApp := bam.NewBaseApp("kvstore", logger, db, decodeTx) - - // Set mounts for BaseApp's MultiStore. - baseApp.MountStores(capKeyMainStore) + opt := bam.SetSubstores(capKeyMainStore) + baseApp := bam.NewBaseApp("kvstore", logger, db, decodeTx, opt) baseApp.SetInitChainer(InitChainer(capKeyMainStore)) // Set a Route. baseApp.Router().AddRoute(sdk.NewRoute("kvstore", KVStoreHandler(capKeyMainStore))) - // Load latest version. - if err := baseApp.LoadLatestVersion(); err != nil { - return nil, err - } - return baseApp, nil } diff --git a/server/mock/store.go b/server/mock/store.go index ebc4caaa421f..13f3c66ee98a 100644 --- a/server/mock/store.go +++ b/server/mock/store.go @@ -4,37 +4,36 @@ import ( "io" protoio "github.com/gogo/protobuf/io" - dbm "github.com/tendermint/tm-db" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - storetypes "github.com/cosmos/cosmos-sdk/store/types" + storetypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" sdk "github.com/cosmos/cosmos-sdk/types" ) -var _ sdk.MultiStore = multiStore{} +var _ sdk.CommitMultiStore = multiStore{} type multiStore struct { kv map[storetypes.StoreKey]kvStore } -func (ms multiStore) CacheMultiStore() sdk.CacheMultiStore { - panic("not implemented") -} +var _ sdk.KVStore = kvStore{} -func (ms multiStore) CacheMultiStoreWithVersion(_ int64) (sdk.CacheMultiStore, error) { - panic("not implemented") +type kvStore struct { + store map[string][]byte } -func (ms multiStore) CacheWrap() storetypes.CacheWrap { - panic("not implemented") -} +type MultiStoreConfig = []storetypes.StoreKey -func (ms multiStore) CacheWrapWithTrace(_ io.Writer, _ sdk.TraceContext) storetypes.CacheWrap { - panic("not implemented") +func NewCommitMultiStore(c MultiStoreConfig) sdk.CommitMultiStore { + stores := make(map[storetypes.StoreKey]kvStore) + for _, skey := range c { + stores[skey] = kvStore{map[string][]byte{}} + } + return multiStore{kv: stores} } -func (ms multiStore) CacheWrapWithListeners(_ storetypes.StoreKey, _ []storetypes.WriteListener) storetypes.CacheWrap { +func (ms multiStore) CacheWrap() sdk.CacheMultiStore { panic("not implemented") } @@ -42,11 +41,11 @@ func (ms multiStore) TracingEnabled() bool { panic("not implemented") } -func (ms multiStore) SetTracingContext(tc sdk.TraceContext) sdk.MultiStore { +func (ms multiStore) SetTracingContext(tc sdk.TraceContext) { panic("not implemented") } -func (ms multiStore) SetTracer(w io.Writer) sdk.MultiStore { +func (ms multiStore) SetTracer(w io.Writer) { panic("not implemented") } @@ -74,43 +73,24 @@ func (ms multiStore) GetPruning() pruningtypes.PruningOptions { panic("not implemented") } -func (ms multiStore) GetCommitKVStore(key storetypes.StoreKey) storetypes.CommitKVStore { - panic("not implemented") -} - -func (ms multiStore) GetCommitStore(key storetypes.StoreKey) storetypes.CommitStore { - panic("not implemented") -} - -func (ms multiStore) MountStoreWithDB(key storetypes.StoreKey, typ storetypes.StoreType, db dbm.DB) { - ms.kv[key] = kvStore{store: make(map[string][]byte)} -} - -func (ms multiStore) LoadLatestVersion() error { - return nil +func (ms multiStore) GetKVStore(key storetypes.StoreKey) sdk.KVStore { + return ms.kv[key] } -func (ms multiStore) LoadLatestVersionAndUpgrade(upgrades *storetypes.StoreUpgrades) error { - return nil +func (ms multiStore) HasKVStore(key storetypes.StoreKey) bool { + _, has := ms.kv[key] + return has } -func (ms multiStore) LoadVersionAndUpgrade(ver int64, upgrades *storetypes.StoreUpgrades) error { +func (ms multiStore) SetInitialVersion(version uint64) error { panic("not implemented") } -func (ms multiStore) LoadVersion(ver int64) error { +func (ms multiStore) GetVersion(int64) (storetypes.MultiStore, error) { panic("not implemented") } -func (ms multiStore) GetKVStore(key storetypes.StoreKey) sdk.KVStore { - return ms.kv[key] -} - -func (ms multiStore) GetStore(key storetypes.StoreKey) sdk.Store { - panic("not implemented") -} - -func (ms multiStore) GetStoreType() storetypes.StoreType { +func (ms multiStore) GetAllVersions() []int { panic("not implemented") } @@ -126,11 +106,7 @@ func (ms multiStore) SetInterBlockCache(_ sdk.MultiStorePersistentCache) { panic("not implemented") } -func (ms multiStore) SetIAVLCacheSize(size int) { - panic("not implemented") -} - -func (ms multiStore) SetInitialVersion(version int64) error { +func (ms multiStore) Close() error { panic("not implemented") } @@ -144,12 +120,6 @@ func (ms multiStore) Restore( panic("not implemented") } -var _ sdk.KVStore = kvStore{} - -type kvStore struct { - store map[string][]byte -} - func (kv kvStore) CacheWrap() storetypes.CacheWrap { panic("not implemented") } @@ -188,14 +158,6 @@ func (kv kvStore) Delete(key []byte) { delete(kv.store, string(key)) } -func (kv kvStore) Prefix(prefix []byte) sdk.KVStore { - panic("not implemented") -} - -func (kv kvStore) Gas(meter sdk.GasMeter, config sdk.GasConfig) sdk.KVStore { - panic("not implmeneted") -} - func (kv kvStore) Iterator(start, end []byte) sdk.Iterator { panic("not implemented") } @@ -203,15 +165,3 @@ func (kv kvStore) Iterator(start, end []byte) sdk.Iterator { func (kv kvStore) ReverseIterator(start, end []byte) sdk.Iterator { panic("not implemented") } - -func (kv kvStore) SubspaceIterator(prefix []byte) sdk.Iterator { - panic("not implemented") -} - -func (kv kvStore) ReverseSubspaceIterator(prefix []byte) sdk.Iterator { - panic("not implemented") -} - -func NewCommitMultiStore() sdk.CommitMultiStore { - return multiStore{kv: make(map[storetypes.StoreKey]kvStore)} -} diff --git a/server/mock/store_test.go b/server/mock/store_test.go index fc5cd12e097e..f1cf5d2fba29 100644 --- a/server/mock/store_test.go +++ b/server/mock/store_test.go @@ -4,20 +4,13 @@ import ( "testing" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" ) func TestStore(t *testing.T) { - db := dbm.NewMemDB() - cms := NewCommitMultiStore() - key := sdk.NewKVStoreKey("test") - cms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) - err := cms.LoadLatestVersion() - require.Nil(t, err) + cms := NewCommitMultiStore(MultiStoreConfig{key}) store := cms.GetKVStore(key) require.NotNil(t, store) diff --git a/server/rollback.go b/server/rollback.go index e99088dd73cc..a0e772b2cf2f 100644 --- a/server/rollback.go +++ b/server/rollback.go @@ -4,7 +4,6 @@ import ( "fmt" "github.com/cosmos/cosmos-sdk/client/flags" - "github.com/cosmos/cosmos-sdk/store/rootmulti" "github.com/spf13/cobra" tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" ) @@ -35,9 +34,13 @@ application. if err != nil { return fmt.Errorf("failed to rollback tendermint state: %w", err) } - // rollback the multistore - cms := rootmulti.NewStore(db, ctx.Logger) - cms.RollbackToVersion(height) + // rollback the database + if err = db.RevertTo(uint64(height)); err != nil { + panic(err) + } + if err = db.Close(); err != nil { + panic(err) + } fmt.Printf("Rolled back state to height %d and hash %X", height, hash) return nil diff --git a/server/types/app.go b/server/types/app.go index 76493b060351..d3ba0cbedbfd 100644 --- a/server/types/app.go +++ b/server/types/app.go @@ -11,9 +11,9 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtypes "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/client" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" ) @@ -56,7 +56,7 @@ type ( // AppCreator is a function that allows us to lazily initialize an // application using various configurations. - AppCreator func(log.Logger, dbm.DB, io.Writer, AppOptions) Application + AppCreator func(log.Logger, dbm.Connection, io.Writer, AppOptions) Application // ModuleInitFlags takes a start command and adds modules specific init flags. ModuleInitFlags func(startCmd *cobra.Command) @@ -76,5 +76,5 @@ type ( // AppExporter is a function that dumps all app state to // JSON-serializable structure and returns the current validator set. - AppExporter func(log.Logger, dbm.DB, io.Writer, int64, bool, []string, AppOptions) (ExportedApp, error) + AppExporter func(log.Logger, dbm.Connection, io.Writer, int64, bool, []string, AppOptions) (ExportedApp, error) ) diff --git a/server/util.go b/server/util.go index ed0d8db04f60..f5f6a503c576 100644 --- a/server/util.go +++ b/server/util.go @@ -23,9 +23,9 @@ import ( tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" tmcfg "github.com/tendermint/tendermint/config" tmlog "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/client/flags" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/server/config" "github.com/cosmos/cosmos-sdk/server/types" sdk "github.com/cosmos/cosmos-sdk/types" @@ -372,7 +372,7 @@ func GetAppDBBackend(opts types.AppOptions) dbm.BackendType { if len(rv) != 0 { return dbm.BackendType(rv) } - return dbm.GoLevelDBBackend + return dbm.BadgerDBBackend } func skipInterface(iface net.Interface) bool { @@ -399,7 +399,7 @@ func addrToIP(addr net.Addr) net.IP { return ip } -func openDB(rootDir string, backendType dbm.BackendType) (dbm.DB, error) { +func openDB(rootDir string, backendType dbm.BackendType) (dbm.Connection, error) { dataDir := filepath.Join(rootDir, "data") return dbm.NewDB("application", backendType, dataDir) } diff --git a/simapp/app.go b/simapp/app.go index b0689eaf7d1d..3430bbaf9f73 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -4,6 +4,7 @@ package simapp import ( _ "embed" + "errors" "io" "net/http" "os" @@ -14,20 +15,22 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - dbm "github.com/tendermint/tm-db" "cosmossdk.io/depinject" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" servertypes "github.com/cosmos/cosmos-sdk/server/types" simappparams "github.com/cosmos/cosmos-sdk/simapp/params" "github.com/cosmos/cosmos-sdk/store/streaming" - storetypes "github.com/cosmos/cosmos-sdk/store/types" + storetypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + // "github.com/cosmos/cosmos-sdk/testutil/mock" "github.com/cosmos/cosmos-sdk/testutil/testdata_pulsar" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" @@ -186,8 +189,12 @@ func init() { // NewSimApp returns a reference to an initialized SimApp. func NewSimApp( - logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, encodingConfig simappparams.EncodingConfig, - appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp), + logger log.Logger, + db dbm.Connection, + traceStore io.Writer, + encodingConfig simappparams.EncodingConfig, + appOpts servertypes.AppOptions, + baseAppOptions ...baseapp.AppOption, ) *SimApp { var ( app = &SimApp{} @@ -236,6 +243,12 @@ func NewSimApp( panic(err) } + // // set the governance module account as the authority for conducting upgrades + // upgradeKeeper := upgradekeeper.NewKeeper(skipUpgradeHeights, app.keys[upgradetypes.StoreKey], app.appCodec, homePath, nil, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + // if upgradeOpt := GetUpgradeStoreOption(upgradeKeeper); upgradeOpt != nil { + // baseAppOptions = append(baseAppOptions, upgradeOpt) + // } + app.App = appBuilder.Build(logger, db, traceStore, baseAppOptions...) // configure state listening capabilities using AppOptions @@ -244,6 +257,10 @@ func NewSimApp( tmos.Exit(err.Error()) } + // // set the governance module account as the authority for conducting upgrades + // upgradeKeeper.SetVersionSetter(app.BaseApp) + // app.UpgradeKeeper = upgradeKeeper + /**** Module Options ****/ // Sets the version setter for the upgrade module @@ -289,16 +306,12 @@ func NewSimApp( app.sm.RegisterStoreDecoders() - // initialize stores - app.MountKVStores(app.keys) - // initialize BaseApp app.SetInitChainer(app.InitChainer) - if err := app.Load(loadLatest); err != nil { + if err := app.Load(); err != nil { panic(err) } - return app } @@ -313,7 +326,7 @@ func (app *SimApp) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci. // LoadHeight loads a particular height func (app *SimApp) LoadHeight(height int64) error { - return app.LoadVersion(height) + return errors.New("cannot load arbitrary height") } // LegacyAmino returns SimApp's amino codec. diff --git a/simapp/app_test.go b/simapp/app_test.go index 49076b2a1bf9..33f12341bcb9 100644 --- a/simapp/app_test.go +++ b/simapp/app_test.go @@ -9,9 +9,9 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/tests/mocks" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" sdk "github.com/cosmos/cosmos-sdk/types" @@ -30,6 +30,7 @@ import ( "github.com/cosmos/cosmos-sdk/x/gov" group "github.com/cosmos/cosmos-sdk/x/group/module" "github.com/cosmos/cosmos-sdk/x/mint" + // nft "github.com/cosmos/cosmos-sdk/x/nft/module" "github.com/cosmos/cosmos-sdk/x/params" "github.com/cosmos/cosmos-sdk/x/slashing" "github.com/cosmos/cosmos-sdk/x/staking" @@ -38,8 +39,8 @@ import ( func TestSimAppExportAndBlockedAddrs(t *testing.T) { encCfg := MakeTestEncodingConfig() - db := dbm.NewMemDB() logger, _ := log.NewDefaultLogger("plain", "info", false) + db := memdb.NewDB() app := NewSimappWithCustomOptions(t, false, SetupOptions{ Logger: logger, DB: db, @@ -56,10 +57,12 @@ func TestSimAppExportAndBlockedAddrs(t *testing.T) { } app.Commit() + require.NoError(t, app.CloseStore()) logger2, _ := log.NewDefaultLogger("plain", "info", false) // Making a new app object with the db, so that initchain hasn't been called - app2 := NewSimApp(logger2, db, nil, true, encCfg, simtestutil.NewAppOptionsWithFlagHome(DefaultNodeHome)) + app2 := NewSimApp(logger2, db, nil, encCfg, simtestutil.NewAppOptionsWithFlagHome(DefaultNodeHome)) + require.NoError(t, app2.Init()) _, err := app2.ExportAppStateAndValidators(false, []string{}) require.NoError(t, err, "ExportAppStateAndValidators should not have an error") } @@ -70,13 +73,12 @@ func TestGetMaccPerms(t *testing.T) { } func TestRunMigrations(t *testing.T) { - db := dbm.NewMemDB() encCfg := MakeTestEncodingConfig() logger, _ := log.NewDefaultLogger("plain", "info", false) - app := NewSimApp(logger, db, nil, true, encCfg, simtestutil.NewAppOptionsWithFlagHome(DefaultNodeHome)) + app := NewSimApp(logger, memdb.NewDB(), nil, encCfg, simtestutil.NewAppOptionsWithFlagHome(DefaultNodeHome)) // Create a new baseapp and configurator for the purpose of this test. - bApp := baseapp.NewBaseApp(app.Name(), logger, db, encCfg.TxConfig.TxDecoder()) + bApp := baseapp.NewBaseApp(app.Name(), logger, memdb.NewDB(), encCfg.TxConfig.TxDecoder()) bApp.SetCommitMultiStoreTracer(nil) bApp.SetInterfaceRegistry(encCfg.InterfaceRegistry) app.BaseApp = bApp @@ -94,6 +96,7 @@ func TestRunMigrations(t *testing.T) { module.RegisterServices(configurator) } + require.NoError(t, app.Init()) // Initialize the chain app.InitChain(abci.RequestInitChain{}) @@ -203,10 +206,11 @@ func TestRunMigrations(t *testing.T) { } func TestInitGenesisOnMigration(t *testing.T) { - db := dbm.NewMemDB() + db := memdb.NewDB() encCfg := MakeTestEncodingConfig() logger, _ := log.NewDefaultLogger("plain", "info", false) - app := NewSimApp(logger, db, nil, true, encCfg, simtestutil.NewAppOptionsWithFlagHome(DefaultNodeHome)) + app := NewSimApp(logger, db, nil, encCfg, simtestutil.NewAppOptionsWithFlagHome(DefaultNodeHome)) + require.NoError(t, app.Init()) ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()}) // Create a mock module. This module will serve as the new module we're @@ -241,6 +245,7 @@ func TestInitGenesisOnMigration(t *testing.T) { "crisis": crisis.AppModule{}.ConsensusVersion(), "genutil": genutil.AppModule{}.ConsensusVersion(), "capability": capability.AppModule{}.ConsensusVersion(), + // "nft": nft.AppModule{}.ConsensusVersion(), }, ) require.NoError(t, err) @@ -248,7 +253,7 @@ func TestInitGenesisOnMigration(t *testing.T) { func TestUpgradeStateOnGenesis(t *testing.T) { encCfg := MakeTestEncodingConfig() - db := dbm.NewMemDB() + db := memdb.NewDB() logger, _ := log.NewDefaultLogger("plain", "info", false) app := NewSimappWithCustomOptions(t, false, SetupOptions{ Logger: logger, diff --git a/simapp/config.go b/simapp/config.go index 48cc053f2533..498c53c4d2b6 100644 --- a/simapp/config.go +++ b/simapp/config.go @@ -22,7 +22,7 @@ var ( FlagCommitValue bool FlagOnOperationValue bool // TODO: Remove in favor of binary search for invariant violation FlagAllInvariantsValue bool - FlagDBBackendValue string + FlagTMDBBackendValue string FlagEnabledValue bool FlagVerboseValue bool @@ -47,7 +47,7 @@ func GetSimulatorFlags() { flag.BoolVar(&FlagCommitValue, "Commit", false, "have the simulation commit") flag.BoolVar(&FlagOnOperationValue, "SimulateEveryOperation", false, "run slow invariants every operation") flag.BoolVar(&FlagAllInvariantsValue, "PrintAllInvariants", false, "print all invariants if a broken invariant is found") - flag.StringVar(&FlagDBBackendValue, "DBBackend", "goleveldb", "custom db backend type") + flag.StringVar(&FlagTMDBBackendValue, "DBBackend", "badgerdb", "custom db backend type") // simulation flags flag.BoolVar(&FlagEnabledValue, "Enabled", false, "enable the simulation") @@ -73,6 +73,6 @@ func NewConfigFromFlags() simulation.Config { Commit: FlagCommitValue, OnOperation: FlagOnOperationValue, AllInvariants: FlagAllInvariantsValue, - DBBackend: FlagDBBackendValue, + DBBackend: FlagTMDBBackendValue, } } diff --git a/simapp/export.go b/simapp/export.go index 990a426c4dc0..e1706b156572 100644 --- a/simapp/export.go +++ b/simapp/export.go @@ -19,14 +19,29 @@ import ( func (app *SimApp) ExportAppStateAndValidators( forZeroHeight bool, jailAllowedAddrs []string, ) (servertypes.ExportedApp, error) { + return app.ExportAppStateAndValidatorsAt(forZeroHeight, jailAllowedAddrs, 0) +} + +// ExportAppStateAndValidatorsAt exports the application state at a given block +// height for a genesis file. +// Passing a height < 1 will export for the latest block height. +func (app *SimApp) ExportAppStateAndValidatorsAt( + forZeroHeight bool, jailAllowedAddrs []string, height int64, +) (servertypes.ExportedApp, error) { + if height < 1 { + height = app.LastBlockHeight() + } // as if they could withdraw from the start of the next block - ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()}) + ctx, err := app.NewContextAt(true, tmproto.Header{Height: height}, height) + if err != nil { + return servertypes.ExportedApp{}, err + } // We export at last height + 1, because that's the height at which // Tendermint will start InitChain. - height := app.LastBlockHeight() + 1 + exportHeight := height + 1 if forZeroHeight { - height = 0 + exportHeight = 0 app.prepForZeroHeightGenesis(ctx, jailAllowedAddrs) } @@ -40,7 +55,7 @@ func (app *SimApp) ExportAppStateAndValidators( return servertypes.ExportedApp{ AppState: appState, Validators: validators, - Height: height, + Height: exportHeight, ConsensusParams: app.BaseApp.GetConsensusParams(ctx), }, err } diff --git a/simapp/integration/server/export_test.go b/simapp/integration/server/export_test.go index be92d72d4175..133a1b167187 100644 --- a/simapp/integration/server/export_test.go +++ b/simapp/integration/server/export_test.go @@ -17,10 +17,10 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtypes "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/flags" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/server/types" "github.com/cosmos/cosmos-sdk/simapp" @@ -106,7 +106,7 @@ func TestExportCmd_Height(t *testing.T) { cmd.SetOut(output) args := append(tc.flags, fmt.Sprintf("--%s=%s", flags.FlagHome, tempDir)) cmd.SetArgs(args) - require.NoError(t, cmd.ExecuteContext(ctx)) + require.NoError(t, cmd.ExecuteContext(ctx), tc.name) var exportedGenDoc tmtypes.GenesisDoc err := tmjson.Unmarshal(output.Bytes(), &exportedGenDoc) @@ -129,7 +129,7 @@ func setupApp(t *testing.T, tempDir string) (*simapp.SimApp, context.Context, *t logger, _ := log.NewDefaultLogger("plain", "info", false) db := dbm.NewMemDB() encCfg := simapp.MakeTestEncodingConfig() - app := simapp.NewSimApp(logger, db, nil, true, encCfg, simtestutil.NewAppOptionsWithFlagHome(tempDir)) + app := simapp.NewSimApp(logger, db, nil, encCfg, simtestutil.NewAppOptionsWithFlagHome(tempDir)) genesisState := simapp.GenesisStateWithSingleValidator(t, app) stateBytes, err := tmjson.MarshalIndent(genesisState, "", " ") @@ -155,21 +155,13 @@ func setupApp(t *testing.T, tempDir string) (*simapp.SimApp, context.Context, *t app.Commit() cmd := server.ExportCmd( - func(_ log.Logger, _ dbm.DB, _ io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string, appOptions types.AppOptions) (types.ExportedApp, error) { + func(_ log.Logger, _ dbm.Connection, _ io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string, appOptions types.AppOptions) (types.ExportedApp, error) { + require.NoError(t, app.CloseStore()) encCfg := simapp.MakeTestEncodingConfig() + simApp := simapp.NewSimApp(logger, db, nil, encCfg, appOptions) + require.NoError(t, simApp.Init()) - var simApp *simapp.SimApp - if height != -1 { - simApp = simapp.NewSimApp(logger, db, nil, false, encCfg, appOptions) - - if err := simApp.LoadHeight(height); err != nil { - return types.ExportedApp{}, err - } - } else { - simApp = simapp.NewSimApp(logger, db, nil, true, encCfg, appOptions) - } - - return simApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs) + return simApp.ExportAppStateAndValidatorsAt(forZeroHeight, jailAllowedAddrs, height) }, tempDir) ctx := context.Background() diff --git a/simapp/sim_bench_test.go b/simapp/sim_bench_test.go index 115dd62b6073..e76331fafb47 100644 --- a/simapp/sim_bench_test.go +++ b/simapp/sim_bench_test.go @@ -37,7 +37,7 @@ func BenchmarkFullAppSimulation(b *testing.B) { appOptions[flags.FlagHome] = DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = FlagPeriodValue - app := NewSimApp(logger, db, nil, true, MakeTestEncodingConfig(), appOptions, interBlockCacheOpt()) + app := NewSimApp(logger, db, nil, MakeTestEncodingConfig(), appOptions, interBlockCacheOpt()) // run randomized simulation _, simParams, simErr := simulation.SimulateFromSeed( @@ -88,7 +88,7 @@ func BenchmarkInvariants(b *testing.B) { appOptions[flags.FlagHome] = DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = FlagPeriodValue - app := NewSimApp(logger, db, nil, true, MakeTestEncodingConfig(), appOptions, interBlockCacheOpt()) + app := NewSimApp(logger, db, nil, MakeTestEncodingConfig(), appOptions, interBlockCacheOpt()) // run randomized simulation _, simParams, simErr := simulation.SimulateFromSeed( diff --git a/simapp/sim_test.go b/simapp/sim_test.go index fc7bc7e8c0ba..1d02fd7c1092 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -13,14 +13,13 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/server" storetypes "github.com/cosmos/cosmos-sdk/store/types" "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/store" + "github.com/cosmos/cosmos-sdk/db/memdb" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" sdk "github.com/cosmos/cosmos-sdk/types" simtypes "github.com/cosmos/cosmos-sdk/types/simulation" @@ -50,15 +49,16 @@ type StoreKeysPrefixes struct { } // fauxMerkleModeOpt returns a BaseApp option to use a dbStoreAdapter instead of -// an IAVLStore for faster simulation speed. -func fauxMerkleModeOpt(bapp *baseapp.BaseApp) { +// a Merkle tree store for faster simulation speed. +var fauxMerkleModeOpt = baseapp.AppOptionFunc(func(bapp *baseapp.BaseApp) { bapp.SetFauxMerkleMode() -} +}) // interBlockCacheOpt returns a BaseApp option function that sets the persistent // inter-block write-through cache. -func interBlockCacheOpt() func(*baseapp.BaseApp) { - return baseapp.SetInterBlockCache(store.NewCommitKVStoreCacheManager()) +// TODO: implement this cache as enhancement to v2 multistore +func interBlockCacheOpt() baseapp.AppOptionFunc { + return func(*baseapp.BaseApp) {} } func TestFullAppSimulation(t *testing.T) { @@ -77,7 +77,7 @@ func TestFullAppSimulation(t *testing.T) { appOptions[flags.FlagHome] = DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = FlagPeriodValue - app := NewSimApp(logger, db, nil, true, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) + app := NewSimApp(logger, db, nil, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) require.Equal(t, "SimApp", app.Name()) // run randomized simulation @@ -119,7 +119,7 @@ func TestAppImportExport(t *testing.T) { appOptions[flags.FlagHome] = DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = FlagPeriodValue - app := NewSimApp(logger, db, nil, true, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) + app := NewSimApp(logger, db, nil, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) require.Equal(t, "SimApp", app.Name()) // Run randomized simulation @@ -159,7 +159,7 @@ func TestAppImportExport(t *testing.T) { require.NoError(t, os.RemoveAll(newDir)) }() - newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) + newApp := NewSimApp(log.NewNopLogger(), newDB, nil, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) require.Equal(t, "SimApp", newApp.Name()) var genesisState GenesisState @@ -232,7 +232,7 @@ func TestAppSimulationAfterImport(t *testing.T) { appOptions[flags.FlagHome] = DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = FlagPeriodValue - app := NewSimApp(logger, db, nil, true, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) + app := NewSimApp(logger, db, nil, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) require.Equal(t, "SimApp", app.Name()) // Run randomized simulation @@ -277,7 +277,7 @@ func TestAppSimulationAfterImport(t *testing.T) { require.NoError(t, os.RemoveAll(newDir)) }() - newApp := NewSimApp(log.NewNopLogger(), newDB, nil, true, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) + newApp := NewSimApp(log.NewNopLogger(), newDB, nil, MakeTestEncodingConfig(), appOptions, fauxMerkleModeOpt) require.Equal(t, "SimApp", newApp.Name()) newApp.InitChain(abci.RequestInitChain{ @@ -331,8 +331,8 @@ func TestAppStateDeterminism(t *testing.T) { logger = log.NewNopLogger() } - db := dbm.NewMemDB() - app := NewSimApp(logger, db, nil, true, MakeTestEncodingConfig(), appOptions, interBlockCacheOpt()) + db := memdb.NewDB() + app := NewSimApp(logger, db, nil, MakeTestEncodingConfig(), appOptions, interBlockCacheOpt()) fmt.Printf( "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n", diff --git a/simapp/simd/cmd/root.go b/simapp/simd/cmd/root.go index b1747abdceaa..a316f2b1a172 100644 --- a/simapp/simd/cmd/root.go +++ b/simapp/simd/cmd/root.go @@ -12,7 +12,6 @@ import ( tmcfg "github.com/tendermint/tendermint/config" tmcli "github.com/tendermint/tendermint/libs/cli" "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/client" @@ -21,6 +20,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/client/keys" "github.com/cosmos/cosmos-sdk/client/rpc" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/server" serverconfig "github.com/cosmos/cosmos-sdk/server/config" servertypes "github.com/cosmos/cosmos-sdk/server/types" @@ -245,7 +245,12 @@ type appCreator struct { } // newApp is an appCreator -func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, appOpts servertypes.AppOptions) servertypes.Application { +func (a appCreator) newApp( + logger log.Logger, + db dbm.Connection, + traceStore io.Writer, + appOpts servertypes.AppOptions, +) servertypes.Application { var cache sdk.MultiStorePersistentCache if cast.ToBool(appOpts.Get(server.FlagInterBlockCache)) { @@ -263,7 +268,7 @@ func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, a } snapshotDir := filepath.Join(cast.ToString(appOpts.Get(flags.FlagHome)), "data", "snapshots") - snapshotDB, err := dbm.NewDB("metadata", server.GetAppDBBackend(appOpts), snapshotDir) + snapshotDB, err := dbm.NewDB("metadata", dbm.BadgerDBBackend, snapshotDir) if err != nil { panic(err) } @@ -278,7 +283,7 @@ func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, a ) return simapp.NewSimApp( - logger, db, traceStore, true, + logger, db, traceStore, a.encCfg, appOpts, baseapp.SetPruning(pruningOpts), @@ -296,7 +301,7 @@ func (a appCreator) newApp(logger log.Logger, db dbm.DB, traceStore io.Writer, a // appExport creates a new simapp (optionally at a given height) // and exports state. func (a appCreator) appExport( - logger log.Logger, db dbm.DB, traceStore io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string, appOpts servertypes.AppOptions, + logger log.Logger, db dbm.Connection, traceStore io.Writer, height int64, forZeroHeight bool, jailAllowedAddrs []string, appOpts servertypes.AppOptions, ) (servertypes.ExportedApp, error) { var simApp *simapp.SimApp @@ -316,15 +321,9 @@ func (a appCreator) appExport( viperAppOpts.Set(server.FlagInvCheckPeriod, 1) appOpts = viperAppOpts + simApp = simapp.NewSimApp(logger, db, traceStore, a.encCfg, appOpts) if height != -1 { - simApp = simapp.NewSimApp(logger, db, traceStore, false, a.encCfg, appOpts) - - if err := simApp.LoadHeight(height); err != nil { - return servertypes.ExportedApp{}, err - } - } else { - simApp = simapp.NewSimApp(logger, db, traceStore, true, a.encCfg, appOpts) + return simApp.ExportAppStateAndValidatorsAt(forZeroHeight, jailAllowedAddrs, height) } - return simApp.ExportAppStateAndValidators(forZeroHeight, jailAllowedAddrs) } diff --git a/simapp/test_helpers.go b/simapp/test_helpers.go index 5ae000294933..47a023ae9443 100644 --- a/simapp/test_helpers.go +++ b/simapp/test_helpers.go @@ -13,7 +13,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtypes "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" "cosmossdk.io/depinject" "cosmossdk.io/math" @@ -22,6 +21,7 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/db/memdb" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/server/types" @@ -41,20 +41,19 @@ import ( // SetupOptions defines arguments that are passed into `Simapp` constructor. type SetupOptions struct { Logger log.Logger - DB *dbm.MemDB + DB *memdb.MemDB EncConfig simappparams.EncodingConfig AppOpts types.AppOptions } func setup(withGenesis bool, invCheckPeriod uint) (*SimApp, GenesisState) { - db := dbm.NewMemDB() encCdc := MakeTestEncodingConfig() appOptions := make(simtestutil.AppOptionsMap, 0) appOptions[flags.FlagHome] = DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = invCheckPeriod - app := NewSimApp(log.NewNopLogger(), db, nil, true, encCdc, appOptions) + app := NewSimApp(log.NewNopLogger(), memdb.NewDB(), nil, encCdc, appOptions) if withGenesis { return app, NewDefaultGenesisState(encCdc.Codec) } @@ -80,7 +79,9 @@ func NewSimappWithCustomOptions(t *testing.T, isCheckTx bool, options SetupOptio Coins: sdk.NewCoins(sdk.NewCoin(sdk.DefaultBondDenom, sdk.NewInt(100000000000000))), } - app := NewSimApp(options.Logger, options.DB, nil, true, options.EncConfig, options.AppOpts) + app := NewSimApp(options.Logger, options.DB, nil, options.EncConfig, options.AppOpts) + app.SetParamStore(mock.NewParamStore(memdb.NewDB())) + require.NoError(t, app.Init()) genesisState := NewDefaultGenesisState(app.appCodec) genesisState, err = simtestutil.GenesisStateWithValSet(app.AppCodec(), genesisState, valSet, []authtypes.GenesisAccount{acc}, balance) require.NoError(t, err) @@ -367,13 +368,15 @@ func NewTestNetworkFixture() network.TestFixture { InterfaceRegistry: encodingCfg.InterfaceRegistry, } appCtr := func(val testutil.Validator) servertypes.Application { - return NewSimApp( - val.GetCtx().Logger, dbm.NewMemDB(), nil, true, + app := NewSimApp( + val.GetCtx().Logger, memdb.NewDB(), nil, encodingCfg, simtestutil.NewAppOptionsWithFlagHome(val.GetCtx().Config.RootDir), bam.SetPruning(pruningtypes.NewPruningOptionsFromString(val.GetAppConfig().Pruning)), bam.SetMinGasPrices(val.GetAppConfig().MinGasPrices), ) + // app.SetParamStore(mock.NewParamStore(memdb.NewDB())) + return app } return network.TestFixture{ diff --git a/simapp/upgrades.go b/simapp/upgrades.go index 4caab418ac7a..a7c57f055abe 100644 --- a/simapp/upgrades.go +++ b/simapp/upgrades.go @@ -1,11 +1,13 @@ package simapp import ( + "github.com/cosmos/cosmos-sdk/baseapp" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" "github.com/cosmos/cosmos-sdk/x/group" "github.com/cosmos/cosmos-sdk/x/nft" + upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" ) @@ -15,18 +17,13 @@ import ( // when an application is migrating from Cosmos SDK version v0.45.x to v0.46.x. const UpgradeName = "v045-to-v046" -func (app SimApp) RegisterUpgradeHandlers() { - app.UpgradeKeeper.SetUpgradeHandler(UpgradeName, - func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { - return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM) - }) - - upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() +func GetUpgradeStoreOption(keeper upgradekeeper.Keeper) baseapp.StoreOption { + upgradeInfo, err := keeper.ReadUpgradeInfoFromDisk() if err != nil { panic(err) } - if upgradeInfo.Name == UpgradeName && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { + if upgradeInfo.Name == UpgradeName && !keeper.IsSkipHeight(upgradeInfo.Height) { storeUpgrades := storetypes.StoreUpgrades{ Added: []string{ group.ModuleName, @@ -35,6 +32,14 @@ func (app SimApp) RegisterUpgradeHandlers() { } // configure store loader that checks if version == upgradeHeight and applies store upgrades - app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) + return upgradetypes.UpgradeStoreOption(uint64(upgradeInfo.Height), &storeUpgrades) } + return nil +} + +func (app SimApp) RegisterUpgradeHandlers() { + app.UpgradeKeeper.SetUpgradeHandler(UpgradeName, + func(ctx sdk.Context, plan upgradetypes.Plan, fromVM module.VersionMap) (module.VersionMap, error) { + return app.ModuleManager.RunMigrations(ctx, app.Configurator(), fromVM) + }) } diff --git a/simapp/utils.go b/simapp/utils.go index 4ab543464121..5c87395e9a94 100644 --- a/simapp/utils.go +++ b/simapp/utils.go @@ -6,9 +6,9 @@ import ( "os" "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" + db "github.com/cosmos/cosmos-sdk/db" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/kv" @@ -18,8 +18,8 @@ import ( // SetupSimulation creates the config, db (levelDB), temporary directory and logger for // the simulation tests. If `FlagEnabledValue` is false it skips the current test. -// Returns error on an invalid db intantiation or temp dir creation. -func SetupSimulation(dirPrefix, dbName string) (simtypes.Config, dbm.DB, string, log.Logger, bool, error) { +// Returns error on an invalid db instantiation or temp dir creation. +func SetupSimulation(dirPrefix, dbName string) (simtypes.Config, db.Connection, string, log.Logger, bool, error) { if !FlagEnabledValue { return simtypes.Config{}, nil, "", nil, true, nil } @@ -39,7 +39,7 @@ func SetupSimulation(dirPrefix, dbName string) (simtypes.Config, dbm.DB, string, return simtypes.Config{}, nil, "", nil, false, err } - db, err := dbm.NewDB(dbName, dbm.BackendType(config.DBBackend), dir) + db, err := db.NewDB("sim_db", db.BadgerDBBackend, dir) if err != nil { return simtypes.Config{}, nil, "", nil, false, err } @@ -104,10 +104,10 @@ func CheckExportSimulation( } // PrintStats prints the corresponding statistics from the app DB. -func PrintStats(db dbm.DB) { - fmt.Println("\nLevelDB Stats") - fmt.Println(db.Stats()["leveldb.stats"]) - fmt.Println("LevelDB cached block size", db.Stats()["leveldb.cachedblock"]) +// TODO: implement stats collection for DBConnection +func PrintStats(db db.Connection) { + // stats := db.Stats() + fmt.Println("\nDB Stats: not available") } // GetSimulationLog unmarshals the KVPair's Value to the corresponding type based on the diff --git a/snapshots/helpers_test.go b/snapshots/helpers_test.go index 24051a17a927..34d4fa0b0a59 100644 --- a/snapshots/helpers_test.go +++ b/snapshots/helpers_test.go @@ -13,8 +13,8 @@ import ( protoio "github.com/gogo/protobuf/io" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - db "github.com/tendermint/tm-db" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/testutil" @@ -161,7 +161,7 @@ func (m *mockSnapshotter) SetSnapshotInterval(snapshotInterval uint64) { // setupBusyManager creates a manager with an empty store that is busy creating a snapshot at height 1. // The snapshot will complete when the returned closer is called. func setupBusyManager(t *testing.T) *snapshots.Manager { - store, err := snapshots.NewStore(db.NewMemDB(), testutil.GetTempDir(t)) + store, err := snapshots.NewStore(memdb.NewDB(), testutil.GetTempDir(t)) require.NoError(t, err) hung := newHungSnapshotter() hung.SetSnapshotInterval(opts.Interval) diff --git a/snapshots/store.go b/snapshots/store.go index 8105938b80f1..dbed75584714 100644 --- a/snapshots/store.go +++ b/snapshots/store.go @@ -11,8 +11,8 @@ import ( "sync" "github.com/gogo/protobuf/proto" - db "github.com/tendermint/tm-db" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/snapshots/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" ) @@ -24,7 +24,7 @@ const ( // Store is a snapshot store, containing snapshot metadata and binary chunks. type Store struct { - db db.DB + db dbm.Connection dir string mtx sync.Mutex @@ -32,7 +32,7 @@ type Store struct { } // NewStore creates a new snapshot store. -func NewStore(db db.DB, dir string) (*Store, error) { +func NewStore(db dbm.Connection, dir string) (*Store, error) { if dir == "" { return nil, sdkerrors.Wrap(sdkerrors.ErrLogic, "snapshot directory not given") } @@ -57,11 +57,18 @@ func (s *Store) Delete(height uint64, format uint32) error { return sdkerrors.Wrapf(sdkerrors.ErrConflict, "snapshot for height %v format %v is currently being saved", height, format) } - err := s.db.DeleteSync(encodeKey(height, format)) + dbw := s.db.Writer() + defer dbw.Discard() + err := dbw.Delete(encodeKey(height, format)) if err != nil { return sdkerrors.Wrapf(err, "failed to delete snapshot for height %v format %v", height, format) } + err = dbw.Commit() + if err != nil { + return sdkerrors.Wrapf(err, "failed to commit snapshot deletion for height %v format %v", + height, format) + } err = os.RemoveAll(s.pathSnapshot(height, format)) return sdkerrors.Wrapf(err, "failed to delete snapshot chunks for height %v format %v", height, format) @@ -69,7 +76,9 @@ func (s *Store) Delete(height uint64, format uint32) error { // Get fetches snapshot info from the database. func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) { - bytes, err := s.db.Get(encodeKey(height, format)) + dbr := s.db.Reader() + defer dbr.Discard() + bytes, err := dbr.Get(encodeKey(height, format)) if err != nil { return nil, sdkerrors.Wrapf(err, "failed to fetch snapshot metadata for height %v format %v", height, format) @@ -91,14 +100,16 @@ func (s *Store) Get(height uint64, format uint32) (*types.Snapshot, error) { // Get fetches the latest snapshot from the database, if any. func (s *Store) GetLatest() (*types.Snapshot, error) { - iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) + dbr := s.db.Reader() + defer dbr.Discard() + iter, err := dbr.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) if err != nil { return nil, sdkerrors.Wrap(err, "failed to find latest snapshot") } defer iter.Close() var snapshot *types.Snapshot - if iter.Valid() { + if iter.Next() { snapshot = &types.Snapshot{} err := proto.Unmarshal(iter.Value(), snapshot) if err != nil { @@ -111,14 +122,16 @@ func (s *Store) GetLatest() (*types.Snapshot, error) { // List lists snapshots, in reverse order (newest first). func (s *Store) List() ([]*types.Snapshot, error) { - iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) + dbr := s.db.Reader() + defer dbr.Discard() + iter, err := dbr.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) if err != nil { return nil, sdkerrors.Wrap(err, "failed to list snapshots") } defer iter.Close() snapshots := make([]*types.Snapshot, 0) - for ; iter.Valid(); iter.Next() { + for iter.Next() { snapshot := &types.Snapshot{} err := proto.Unmarshal(iter.Value(), snapshot) if err != nil { @@ -181,7 +194,9 @@ func (s *Store) loadChunkFile(height uint64, format uint32, chunk uint32) (io.Re // Prune removes old snapshots. The given number of most recent heights (regardless of format) are retained. func (s *Store) Prune(retain uint32) (uint64, error) { - iter, err := s.db.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) + dbr := s.db.Reader() + defer dbr.Discard() + iter, err := dbr.ReverseIterator(encodeKey(0, 0), encodeKey(uint64(math.MaxUint64), math.MaxUint32)) if err != nil { return 0, sdkerrors.Wrap(err, "failed to prune snapshots") } @@ -190,7 +205,7 @@ func (s *Store) Prune(retain uint32) (uint64, error) { pruned := uint64(0) prunedHeights := make(map[uint64]bool) skip := make(map[uint64]bool) - for ; iter.Valid(); iter.Next() { + for iter.Next() { height, format, err := decodeKey(iter.Key()) if err != nil { return 0, sdkerrors.Wrap(err, "failed to prune snapshots") @@ -242,7 +257,9 @@ func (s *Store) Save( s.mtx.Unlock() }() - exists, err := s.db.Has(encodeKey(height, format)) + dbr := s.db.Reader() + defer dbr.Discard() + exists, err := dbr.Has(encodeKey(height, format)) if err != nil { return nil, err } @@ -250,6 +267,7 @@ func (s *Store) Save( return nil, sdkerrors.Wrapf(sdkerrors.ErrConflict, "snapshot already exists for height %v format %v", height, format) } + dbr.Discard() snapshot := &types.Snapshot{ Height: height, @@ -299,7 +317,13 @@ func (s *Store) saveSnapshot(snapshot *types.Snapshot) error { if err != nil { return sdkerrors.Wrap(err, "failed to encode snapshot metadata") } - err = s.db.SetSync(encodeKey(snapshot.Height, snapshot.Format), value) + dbw := s.db.Writer() + defer dbw.Discard() + err = dbw.Set(encodeKey(snapshot.Height, snapshot.Format), value) + if err != nil { + return sdkerrors.Wrap(err, "failed to store snapshot") + } + err = dbw.Commit() return sdkerrors.Wrap(err, "failed to store snapshot") } diff --git a/snapshots/store_test.go b/snapshots/store_test.go index b2a8354bb2b9..bca46300e563 100644 --- a/snapshots/store_test.go +++ b/snapshots/store_test.go @@ -10,15 +10,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - db "github.com/tendermint/tm-db" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/snapshots" "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/testutil" ) func setupStore(t *testing.T) *snapshots.Store { - store, err := snapshots.NewStore(db.NewMemDB(), testutil.GetTempDir(t)) + store, err := snapshots.NewStore(memdb.NewDB(), testutil.GetTempDir(t)) require.NoError(t, err) _, err = store.Save(1, 1, makeChunks([][]byte{ @@ -43,20 +43,20 @@ func setupStore(t *testing.T) *snapshots.Store { func TestNewStore(t *testing.T) { tempdir := t.TempDir() - _, err := snapshots.NewStore(db.NewMemDB(), tempdir) + _, err := snapshots.NewStore(memdb.NewDB(), tempdir) require.NoError(t, err) } func TestNewStore_ErrNoDir(t *testing.T) { - _, err := snapshots.NewStore(db.NewMemDB(), "") + _, err := snapshots.NewStore(memdb.NewDB(), "") require.Error(t, err) } func TestNewStore_ErrDirFailure(t *testing.T) { notADir := filepath.Join(testutil.TempFile(t).Name(), "subdir") - _, err := snapshots.NewStore(db.NewMemDB(), notADir) + _, err := snapshots.NewStore(memdb.NewDB(), notADir) require.Error(t, err) } diff --git a/store/dbadapter/store.go b/store/dbadapter/store.go index 2f0ceb5df54a..815c1f12ec1c 100644 --- a/store/dbadapter/store.go +++ b/store/dbadapter/store.go @@ -36,7 +36,7 @@ func (dsa Store) Has(key []byte) bool { return ok } -// Set wraps the underlying DB's Set method panicing on error. +// Set wraps the underlying DB's Set method panicking on error. func (dsa Store) Set(key, value []byte) { types.AssertValidKey(key) if err := dsa.DB.Set(key, value); err != nil { @@ -44,14 +44,14 @@ func (dsa Store) Set(key, value []byte) { } } -// Delete wraps the underlying DB's Delete method panicing on error. +// Delete wraps the underlying DB's Delete method panicking on error. func (dsa Store) Delete(key []byte) { if err := dsa.DB.Delete(key); err != nil { panic(err) } } -// Iterator wraps the underlying DB's Iterator method panicing on error. +// Iterator wraps the underlying DB's Iterator method panicking on error. func (dsa Store) Iterator(start, end []byte) types.Iterator { iter, err := dsa.DB.Iterator(start, end) if err != nil { @@ -61,7 +61,7 @@ func (dsa Store) Iterator(start, end []byte) types.Iterator { return iter } -// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error. +// ReverseIterator wraps the underlying DB's ReverseIterator method panicking on error. func (dsa Store) ReverseIterator(start, end []byte) types.Iterator { iter, err := dsa.DB.ReverseIterator(start, end) if err != nil { diff --git a/store/reexport.go b/store/reexport.go index 5b101b4ac30f..6ad1e84a6a34 100644 --- a/store/reexport.go +++ b/store/reexport.go @@ -1,30 +1,31 @@ package store import ( - "github.com/cosmos/cosmos-sdk/store/types" + v1 "github.com/cosmos/cosmos-sdk/store/types" + v2 "github.com/cosmos/cosmos-sdk/store/v2alpha1" ) // Import cosmos-sdk/types/store.go for convenience. type ( - Store = types.Store - Committer = types.Committer - CommitStore = types.CommitStore - MultiStore = types.MultiStore - CacheMultiStore = types.CacheMultiStore - CommitMultiStore = types.CommitMultiStore - KVStore = types.KVStore - KVPair = types.KVPair - Iterator = types.Iterator - CacheKVStore = types.CacheKVStore - CommitKVStore = types.CommitKVStore - CacheWrapper = types.CacheWrapper - CacheWrap = types.CacheWrap - CommitID = types.CommitID - Key = types.StoreKey - Type = types.StoreType - Queryable = types.Queryable - TraceContext = types.TraceContext - Gas = types.Gas - GasMeter = types.GasMeter - GasConfig = types.GasConfig + Store = v1.Store + Committer = v1.Committer + CommitStore = v1.CommitStore + MultiStore = v2.MultiStore + CacheMultiStore = v2.CacheMultiStore + CommitMultiStore = v2.CommitMultiStore + KVStore = v1.KVStore + KVPair = v1.KVPair + Iterator = v1.Iterator + CacheKVStore = v1.CacheKVStore + CommitKVStore = v1.CommitKVStore + CacheWrapper = v1.CacheWrapper + CacheWrap = v1.CacheWrap + CommitID = v1.CommitID + Key = v1.StoreKey + Type = v1.StoreType + Queryable = v1.Queryable + TraceContext = v1.TraceContext + Gas = v1.Gas + GasMeter = v1.GasMeter + GasConfig = v1.GasConfig ) diff --git a/store/rootmulti/store.go b/store/rootmulti/store.go index f44b53d50969..3824b904a413 100644 --- a/store/rootmulti/store.go +++ b/store/rootmulti/store.go @@ -79,7 +79,7 @@ func NewStore(db dbm.DB, logger log.Logger) *Store { keysByName: make(map[string]types.StoreKey), listeners: make(map[types.StoreKey][]types.WriteListener), removalMap: make(map[types.StoreKey]bool), - pruningManager: pruning.NewManager(db, logger), + pruningManager: pruning.NewManager(), } } @@ -320,7 +320,7 @@ func moveKVStoreData(oldDB types.KVStore, newDB types.KVStore) error { // If other strategy, this height is persisted until it is // less than - KeepRecent and % Interval == 0 func (rs *Store) PruneSnapshotHeight(height int64) { - rs.pruningManager.HandleHeightSnapshot(height) + rs.pruningManager.HandleHeightSnapshot(height, rs.db) } // SetInterBlockCache sets the Store's internal inter-block (persistent) cache. @@ -533,7 +533,7 @@ func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { } func (rs *Store) handlePruning(version int64) error { - rs.pruningManager.HandleHeight(version - 1) // we should never prune the current version. + rs.pruningManager.HandleHeight(version-1, rs.db) // we should never prune the current version. if !rs.pruningManager.ShouldPruneAtHeight(version) { return nil } @@ -543,7 +543,7 @@ func (rs *Store) handlePruning(version int64) error { } func (rs *Store) pruneStores() error { - pruningHeights, err := rs.pruningManager.GetFlushAndResetPruningHeights() + pruningHeights, err := rs.pruningManager.GetFlushAndResetPruningHeights(rs.db) if err != nil { return err } @@ -942,7 +942,7 @@ func (rs *Store) RollbackToVersion(target int64) int64 { return current } for ; current > target; current-- { - rs.pruningManager.HandleHeight(current) + rs.pruningManager.HandleHeight(current, rs.db) } if err := rs.pruneStores(); err != nil { panic(err) diff --git a/store/rootmulti/store_test.go b/store/rootmulti/store_test.go index ccd58b153feb..e24f5de43450 100644 --- a/store/rootmulti/store_test.go +++ b/store/rootmulti/store_test.go @@ -546,7 +546,7 @@ func TestMultiStore_Pruning_SameHeightsTwice(t *testing.T) { require.NoError(t, err) // Ensure already pruned heights were loaded - heights, err := ms.pruningManager.GetFlushAndResetPruningHeights() + heights, err := ms.pruningManager.GetFlushAndResetPruningHeights(ms.db) require.NoError(t, err) require.Equal(t, expectedHeights, heights) @@ -579,7 +579,7 @@ func TestMultiStore_PruningRestart(t *testing.T) { err := ms.pruningManager.LoadPruningHeights(ms.db) require.NoError(t, err) - actualHeightsToPrune, err := ms.pruningManager.GetFlushAndResetPruningHeights() + actualHeightsToPrune, err := ms.pruningManager.GetFlushAndResetPruningHeights(ms.db) require.NoError(t, err) require.Equal(t, len(pruneHeights), len(actualHeightsToPrune)) require.Equal(t, pruneHeights, actualHeightsToPrune) @@ -590,14 +590,14 @@ func TestMultiStore_PruningRestart(t *testing.T) { err = ms.LoadLatestVersion() require.NoError(t, err) - actualHeightsToPrune, err = ms.pruningManager.GetFlushAndResetPruningHeights() + actualHeightsToPrune, err = ms.pruningManager.GetFlushAndResetPruningHeights(ms.db) require.NoError(t, err) require.Equal(t, pruneHeights, actualHeightsToPrune) // commit one more block and ensure the heights have been pruned ms.Commit() - actualHeightsToPrune, err = ms.pruningManager.GetFlushAndResetPruningHeights() + actualHeightsToPrune, err = ms.pruningManager.GetFlushAndResetPruningHeights(ms.db) require.NoError(t, err) require.Empty(t, actualHeightsToPrune) diff --git a/store/store.go b/store/store.go index 492bd4fee1dc..a84899313210 100644 --- a/store/store.go +++ b/store/store.go @@ -1,16 +1,19 @@ package store import ( - "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/store/cache" - "github.com/cosmos/cosmos-sdk/store/rootmulti" - "github.com/cosmos/cosmos-sdk/store/types" + types "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" ) -func NewCommitMultiStore(db dbm.DB) types.CommitMultiStore { - return rootmulti.NewStore(db, log.NewNopLogger()) +func NewCommitMultiStore(db dbm.Connection) types.CommitMultiStore { + store, err := multi.NewV1MultiStoreAsV2(db, multi.DefaultStoreParams()) + if err != nil { + panic(err) + } + return store } func NewCommitKVStoreCacheManager() types.MultiStorePersistentCache { diff --git a/store/streaming/constructor_test.go b/store/streaming/constructor_test.go index 5edaeccd4eac..8fdca2467881 100644 --- a/store/streaming/constructor_test.go +++ b/store/streaming/constructor_test.go @@ -6,6 +6,7 @@ import ( "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/db/memdb" serverTypes "github.com/cosmos/cosmos-sdk/server/types" "github.com/cosmos/cosmos-sdk/simapp" "github.com/cosmos/cosmos-sdk/store/streaming" @@ -13,8 +14,8 @@ import ( "github.com/cosmos/cosmos-sdk/store/types" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" - dbm "github.com/tendermint/tm-db" "github.com/stretchr/testify/require" ) @@ -50,7 +51,7 @@ func TestStreamingServiceConstructor(t *testing.T) { } func TestLoadStreamingServices(t *testing.T) { - db := dbm.NewMemDB() + db := memdb.NewDB() encCdc := simapp.MakeTestEncodingConfig() keys := sdk.NewKVStoreKeys("mockKey1", "mockKey2") bApp := baseapp.NewBaseApp("appName", log.NewNopLogger(), db, nil) diff --git a/store/v2alpha1/dbadapter/store.go b/store/v2alpha1/dbadapter/store.go index 927cc1f93ae4..451482d80f37 100644 --- a/store/v2alpha1/dbadapter/store.go +++ b/store/v2alpha1/dbadapter/store.go @@ -18,7 +18,7 @@ type Store struct { DB dbm.ReadWriter } -// Get wraps the underlying DB's Get method panicing on error. +// Get wraps the underlying DB's Get method, panicking on error. func (dsa Store) Get(key []byte) []byte { v, err := dsa.DB.Get(key) if err != nil { @@ -28,7 +28,7 @@ func (dsa Store) Get(key []byte) []byte { return v } -// Has wraps the underlying DB's Has method panicing on error. +// Has wraps the underlying DB's Has method, panicking on error. func (dsa Store) Has(key []byte) bool { ok, err := dsa.DB.Has(key) if err != nil { @@ -38,7 +38,7 @@ func (dsa Store) Has(key []byte) bool { return ok } -// Set wraps the underlying DB's Set method panicing on error. +// Set wraps the underlying DB's Set method, panicking on error. func (dsa Store) Set(key, value []byte) { types.AssertValidKey(key) if err := dsa.DB.Set(key, value); err != nil { @@ -46,14 +46,14 @@ func (dsa Store) Set(key, value []byte) { } } -// Delete wraps the underlying DB's Delete method panicing on error. +// Delete wraps the underlying DB's Delete method, panicking on error. func (dsa Store) Delete(key []byte) { if err := dsa.DB.Delete(key); err != nil { panic(err) } } -// Iterator wraps the underlying DB's Iterator method panicing on error. +// Iterator wraps the underlying DB's Iterator method, panicking on error. func (dsa Store) Iterator(start, end []byte) types.Iterator { iter, err := dsa.DB.Iterator(start, end) if err != nil { @@ -62,7 +62,7 @@ func (dsa Store) Iterator(start, end []byte) types.Iterator { return dbutil.ToStoreIterator(iter) } -// ReverseIterator wraps the underlying DB's ReverseIterator method panicing on error. +// ReverseIterator wraps the underlying DB's ReverseIterator method, panicking on error. func (dsa Store) ReverseIterator(start, end []byte) types.Iterator { iter, err := dsa.DB.ReverseIterator(start, end) if err != nil { diff --git a/store/v2alpha1/multi/cache_store.go b/store/v2alpha1/multi/cache_store.go index 5d7ee786da43..390deca28361 100644 --- a/store/v2alpha1/multi/cache_store.go +++ b/store/v2alpha1/multi/cache_store.go @@ -5,7 +5,22 @@ import ( types "github.com/cosmos/cosmos-sdk/store/v2alpha1" ) -// GetKVStore implements BasicMultiStore. +// Branched state +type cacheStore struct { + source types.MultiStore + substores map[string]types.CacheKVStore + *traceListenMixin +} + +func newCacheStore(bs types.MultiStore) *cacheStore { + return &cacheStore{ + source: bs, + substores: map[string]types.CacheKVStore{}, + traceListenMixin: newTraceListenMixin(), + } +} + +// GetKVStore implements MultiStore. func (cs *cacheStore) GetKVStore(skey types.StoreKey) types.KVStore { key := skey.Name() sub, has := cs.substores[key] @@ -18,6 +33,10 @@ func (cs *cacheStore) GetKVStore(skey types.StoreKey) types.KVStore { return cs.wrapTraceListen(sub, skey) } +func (cs *cacheStore) HasKVStore(skey types.StoreKey) bool { + return cs.source.HasKVStore(skey) +} + // Write implements CacheMultiStore. func (cs *cacheStore) Write() { for _, sub := range cs.substores { @@ -25,12 +44,20 @@ func (cs *cacheStore) Write() { } } -// CacheMultiStore implements BasicMultiStore. +// CacheMultiStore implements MultiStore. // This recursively wraps the CacheMultiStore in another cache store. -func (cs *cacheStore) CacheMultiStore() types.CacheMultiStore { - return &cacheStore{ - source: cs, - substores: map[string]types.CacheKVStore{}, - traceListenMixin: newTraceListenMixin(), - } +func (cs *cacheStore) CacheWrap() types.CacheMultiStore { + return newCacheStore(cs) +} + +// A non-writable cache for interface wiring purposes +type noopCacheStore struct { + types.CacheMultiStore +} + +func (noopCacheStore) Write() {} + +// pretend commit store is cache store +func CommitAsCacheStore(s types.CommitMultiStore) types.CacheMultiStore { + return noopCacheStore{newCacheStore(s)} } diff --git a/store/v2alpha1/multi/compat.go b/store/v2alpha1/multi/compat.go new file mode 100644 index 000000000000..07f0088a7d5d --- /dev/null +++ b/store/v2alpha1/multi/compat.go @@ -0,0 +1,152 @@ +package multi + +import ( + "fmt" + "io" + + tmdb "github.com/tendermint/tm-db" + + v1 "github.com/cosmos/cosmos-sdk/store/types" + v2 "github.com/cosmos/cosmos-sdk/store/v2alpha1" +) + +var ( + _ v1.CommitMultiStore = (*compatStore)(nil) + _ v1.Queryable = (*compatStore)(nil) + _ v1.CacheMultiStore = (*compatCacheStore)(nil) +) + +type compatStore struct { + *Store +} + +type compatCacheStore struct { + *cacheStore +} + +func WrapStoreAsV1CommitMultiStore(s v2.CommitMultiStore) (v1.CommitMultiStore, error) { + impl, ok := s.(*Store) + if !ok { + return nil, fmt.Errorf("cannot wrap as v1.CommitMultiStore: %T", s) + } + return &compatStore{impl}, nil +} + +func WrapCacheStoreAsV1CacheMultiStore(cs v2.CacheMultiStore) (v1.CacheMultiStore, error) { + impl, ok := cs.(*cacheStore) + if !ok { + return nil, fmt.Errorf("cannot wrap as v1.CacheMultiStore: %T", cs) + } + return &compatCacheStore{impl}, nil +} + +// commit store + +func (st *compatStore) GetStoreType() v1.StoreType { + return v1.StoreTypeMulti +} + +func (st *compatStore) CacheWrap() v1.CacheWrap { + return st.CacheMultiStore() +} + +// TODO: v1 MultiStore ignores args, do we as well? +func (st *compatStore) CacheWrapWithTrace(io.Writer, v1.TraceContext) v1.CacheWrap { + return st.CacheWrap() +} +func (st *compatStore) CacheWrapWithListeners(v1.StoreKey, []v1.WriteListener) v1.CacheWrap { + return st.CacheWrap() +} + +func (st *compatStore) CacheMultiStore() v1.CacheMultiStore { + return &compatCacheStore{newCacheStore(st.Store)} +} +func (st *compatStore) CacheMultiStoreWithVersion(version int64) (v1.CacheMultiStore, error) { + view, err := st.GetVersion(version) + if err != nil { + return nil, err + } + return &compatCacheStore{newCacheStore(view)}, nil +} + +func (st *compatStore) GetStore(k v1.StoreKey) v1.Store { + return st.GetKVStore(k) +} + +func (st *compatStore) GetCommitStore(key v1.StoreKey) v1.CommitStore { + panic("unsupported: GetCommitStore") +} +func (st *compatStore) GetCommitKVStore(key v1.StoreKey) v1.CommitKVStore { + panic("unsupported: GetCommitKVStore") +} + +func (st *compatStore) SetTracer(w io.Writer) v1.MultiStore { + st.Store.SetTracer(w) + return st +} +func (st *compatStore) SetTracingContext(tc v1.TraceContext) v1.MultiStore { + st.Store.SetTracingContext(tc) + return st +} + +func (st *compatStore) MountStoreWithDB(key v1.StoreKey, typ v1.StoreType, db tmdb.DB) { + panic("unsupported: MountStoreWithDB") +} + +func (st *compatStore) LoadLatestVersion() error { + return nil // this store is always at the latest version +} +func (st *compatStore) LoadLatestVersionAndUpgrade(upgrades *v1.StoreUpgrades) error { + panic("unsupported: LoadLatestVersionAndUpgrade") +} +func (st *compatStore) LoadVersionAndUpgrade(ver int64, upgrades *v1.StoreUpgrades) error { + panic("unsupported: LoadLatestVersionAndUpgrade") +} + +func (st *compatStore) LoadVersion(ver int64) error { + // TODO: could cache a viewStore representing "current" version + panic("unsupported: LoadVersion") +} + +func (st *compatStore) SetInterBlockCache(v1.MultiStorePersistentCache) { + panic("unsupported: SetInterBlockCache") +} +func (st *compatStore) SetInitialVersion(version int64) error { + if version < 0 { + return fmt.Errorf("invalid version") + } + return st.Store.SetInitialVersion(uint64(version)) +} +func (st *compatStore) SetIAVLCacheSize(size int) { + panic("unsupported: SetIAVLCacheSize") +} + +// cache store + +func (cs *compatCacheStore) GetStoreType() v1.StoreType { return v1.StoreTypeMulti } +func (cs *compatCacheStore) CacheWrap() v1.CacheWrap { + return cs.CacheMultiStore() +} +func (cs *compatCacheStore) CacheWrapWithTrace(w io.Writer, tc v1.TraceContext) v1.CacheWrap { + return cs.CacheWrap() +} +func (cs *compatCacheStore) CacheWrapWithListeners(storeKey v1.StoreKey, listeners []v1.WriteListener) v1.CacheWrap { + return cs.CacheWrap() +} +func (cs *compatCacheStore) CacheMultiStore() v1.CacheMultiStore { + return &compatCacheStore{newCacheStore(cs.cacheStore)} +} +func (cs *compatCacheStore) CacheMultiStoreWithVersion(int64) (v1.CacheMultiStore, error) { + return nil, fmt.Errorf("cannot branch cached multi-store with a version") +} + +func (cs *compatCacheStore) GetStore(k v1.StoreKey) v1.Store { return cs.GetKVStore(k) } + +func (cs *compatCacheStore) SetTracer(w io.Writer) v1.MultiStore { + cs.cacheStore.SetTracer(w) + return cs +} +func (cs *compatCacheStore) SetTracingContext(tc v1.TraceContext) v1.MultiStore { + cs.cacheStore.SetTracingContext(tc) + return cs +} diff --git a/store/v2alpha1/multi/migration.go b/store/v2alpha1/multi/migration.go index caaddbf45283..f3bd8576f516 100644 --- a/store/v2alpha1/multi/migration.go +++ b/store/v2alpha1/multi/migration.go @@ -11,7 +11,7 @@ import ( ) // MigrateFromV1 will migrate the state from iavl to smt -func MigrateFromV1(rootMultiStore *v1Store.Store, store2db dbm.Connection, storeConfig StoreConfig) (*Store, error) { +func MigrateFromV1(rootMultiStore *v1Store.Store, store2db dbm.Connection, storeConfig StoreParams) (*Store, error) { type namedStore struct { *iavl.Store name string @@ -21,7 +21,7 @@ func MigrateFromV1(rootMultiStore *v1Store.Store, store2db dbm.Connection, store keyName := storeKey.Name() switch store := rootMultiStore.GetStoreByName(keyName).(type) { case *iavl.Store: - err := storeConfig.RegisterSubstore(keyName, types.StoreTypePersistent) + err := storeConfig.RegisterSubstore(storeKey, types.StoreTypePersistent) if err != nil { return nil, err } diff --git a/store/v2alpha1/multi/migration_test.go b/store/v2alpha1/multi/migration_test.go index bf7b0f921cd0..d723c5433b15 100644 --- a/store/v2alpha1/multi/migration_test.go +++ b/store/v2alpha1/multi/migration_test.go @@ -71,7 +71,7 @@ func TestMigrationV2(t *testing.T) { // setup a new root store of smt db2 := memdb.NewDB() - storeConfig := DefaultStoreConfig() + storeConfig := DefaultStoreParams() // migrating the iavl store (v1) to smt store (v2) v2Store, err := MigrateFromV1(v1Store, db2, storeConfig) require.NoError(t, err) @@ -99,7 +99,7 @@ func TestMigrateV2ForEmptyStore(t *testing.T) { err := v1Store.LoadLatestVersion() require.Nil(t, err) db2 := memdb.NewDB() - storeConfig := DefaultStoreConfig() + storeConfig := DefaultStoreParams() // migrating the iavl store (v1) to smt store (v2) v2Store, err := MigrateFromV1(v1Store, db2, storeConfig) require.NoError(t, err) diff --git a/store/v2alpha1/multi/params.go b/store/v2alpha1/multi/params.go new file mode 100644 index 000000000000..9d5fe0063237 --- /dev/null +++ b/store/v2alpha1/multi/params.go @@ -0,0 +1,63 @@ +package multi + +import ( + "fmt" + + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" + types "github.com/cosmos/cosmos-sdk/store/v2alpha1" +) + +// DefaultStoreParams returns a MultiStore config with an empty schema, a single backing DB, +// pruning with PruneDefault, no listeners and no tracer. +func DefaultStoreParams() StoreParams { + return StoreParams{ + Pruning: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + SchemaBuilder: newSchemaBuilder(), + storeKeys: storeKeys{}, + traceListenMixin: newTraceListenMixin(), + } +} + +func (par *StoreParams) RegisterSubstore(skey types.StoreKey, typ types.StoreType) error { + if !validSubStoreType(typ) { + return fmt.Errorf("StoreType not supported: %v", typ) + } + var ok bool + switch typ { + case types.StoreTypePersistent: + _, ok = skey.(*types.KVStoreKey) + case types.StoreTypeMemory: + _, ok = skey.(*types.MemoryStoreKey) + case types.StoreTypeTransient: + _, ok = skey.(*types.TransientStoreKey) + } + if !ok { + return fmt.Errorf("invalid StoreKey for %v: %T", typ, skey) + } + if err := par.registerName(skey.Name(), typ); err != nil { + return err + } + par.storeKeys[skey.Name()] = skey + return nil +} + +func (par *StoreParams) storeKey(key string) (types.StoreKey, error) { + skey, ok := par.storeKeys[key] + if !ok { + return nil, fmt.Errorf("StoreKey instance not mapped: %s", key) + } + return skey, nil +} + +func RegisterSubstoresFromMap[T types.StoreKey](par *StoreParams, keys map[string]T) error { + for _, key := range keys { + typ, err := types.StoreKeyToType(key) + if err != nil { + return err + } + if err = par.RegisterSubstore(key, typ); err != nil { + return err + } + } + return nil +} diff --git a/store/v2alpha1/multi/proof_test.go b/store/v2alpha1/multi/proof_test.go index 86e9fbfd2ed0..13c69a5d616c 100644 --- a/store/v2alpha1/multi/proof_test.go +++ b/store/v2alpha1/multi/proof_test.go @@ -11,7 +11,7 @@ import ( "github.com/cosmos/cosmos-sdk/store/v2alpha1/smt" ) -// We hash keys produce SMT paths, so reflect that here +// We hash keys to produce SMT paths, so reflect that here func keyPath(prefix, key string) string { hashed := sha256.Sum256([]byte(key)) return prefix + string(hashed[:]) @@ -51,7 +51,7 @@ func TestVerifySMTStoreProof(t *testing.T) { func TestVerifyMultiStoreQueryProof(t *testing.T) { db := memdb.NewDB() - store, err := NewStore(db, simpleStoreConfig(t)) + store, err := NewStore(db, storeParams1(t)) require.NoError(t, err) substore := store.GetKVStore(skey_1) @@ -95,7 +95,7 @@ func TestVerifyMultiStoreQueryProof(t *testing.T) { func TestVerifyMultiStoreQueryProofAbsence(t *testing.T) { db := memdb.NewDB() - store, err := NewStore(db, simpleStoreConfig(t)) + store, err := NewStore(db, storeParams1(t)) require.NoError(t, err) substore := store.GetKVStore(skey_1) diff --git a/store/v2alpha1/multi/snapshot.go b/store/v2alpha1/multi/snapshot.go index 434c5e1c6fc5..864c6c70e294 100644 --- a/store/v2alpha1/multi/snapshot.go +++ b/store/v2alpha1/multi/snapshot.go @@ -107,7 +107,8 @@ func (rs *Store) Restore( } var subStore *substore - storeSchemaReceived := false + var storeSchemaReceived = false + var receivedStoreSchema StoreSchema var snapshotItem snapshottypes.SnapshotItem @@ -123,13 +124,13 @@ loop: switch item := snapshotItem.Item.(type) { case *snapshottypes.SnapshotItem_Schema: - receivedStoreSchema := make(StoreSchema, len(item.Schema.GetKeys())) + receivedStoreSchema = make(StoreSchema, len(item.Schema.GetKeys())) storeSchemaReceived = true for _, sKey := range item.Schema.GetKeys() { receivedStoreSchema[string(sKey)] = types.StoreTypePersistent } - if !rs.schema.equal(receivedStoreSchema) { + if !receivedStoreSchema.matches(rs.schema) { return snapshottypes.SnapshotItem{}, sdkerrors.Wrap(sdkerrors.ErrLogic, "received schema does not match app schema") } @@ -140,7 +141,7 @@ loop: return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "received store name before store schema %s", storeName) } // checking the store schema exists or not - if _, has := rs.schema[storeName]; !has { + if _, has := receivedStoreSchema[storeName]; !has { return snapshottypes.SnapshotItem{}, sdkerrors.Wrapf(sdkerrors.ErrLogic, "store is missing from schema %s", storeName) } diff --git a/store/v2alpha1/multi/snapshot_test.go b/store/v2alpha1/multi/snapshot_test.go index 94ecb890a128..48c8aa0802ef 100644 --- a/store/v2alpha1/multi/snapshot_test.go +++ b/store/v2alpha1/multi/snapshot_test.go @@ -23,13 +23,25 @@ import ( "github.com/cosmos/cosmos-sdk/store/types" ) -func multiStoreConfig(t *testing.T, stores int) StoreConfig { - opts := DefaultStoreConfig() +var testStoreKeys []types.StoreKey + +func makeStoreKeys(upto int) { + if len(testStoreKeys) >= upto { + return + } + for i := len(testStoreKeys); i < upto; i++ { + skey := types.NewKVStoreKey(fmt.Sprintf("store%d", i)) + testStoreKeys = append(testStoreKeys, skey) + } +} + +func multiStoreConfig(t *testing.T, stores int) StoreParams { + opts := DefaultStoreParams() opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) + makeStoreKeys(stores) for i := 0; i < stores; i++ { - sKey := types.NewKVStoreKey(fmt.Sprintf("store%d", i)) - require.NoError(t, opts.RegisterSubstore(sKey.Name(), types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(testStoreKeys[i], types.StoreTypePersistent)) } return opts @@ -43,7 +55,7 @@ func newMultiStoreWithGeneratedData(t *testing.T, db dbm.Connection, stores int, var sKeys []string for sKey := range store.schema { - sKeys = append(sKeys, sKey) + sKeys = append(sKeys, sKey.Name()) } sort.Slice(sKeys, func(i, j int) bool { @@ -74,7 +86,7 @@ func newMultiStoreWithBasicData(t *testing.T, db dbm.Connection, stores int) *St require.NoError(t, err) for sKey := range store.schema { - sStore, err := store.getSubstore(sKey) + sStore, err := store.getSubstore(sKey.Name()) require.NoError(t, err) for k, v := range alohaData { sStore.Set([]byte(k), []byte(v)) @@ -216,9 +228,9 @@ func TestMultistoreSnapshotRestore(t *testing.T) { assert.Equal(t, source.LastCommitID(), target.LastCommitID()) for sKey := range source.schema { - sourceSubStore, err := source.getSubstore(sKey) + sourceSubStore, err := source.getSubstore(sKey.Name()) require.NoError(t, err) - targetSubStore, err := target.getSubstore(sKey) + targetSubStore, err := target.getSubstore(sKey.Name()) require.NoError(t, err) require.Equal(t, sourceSubStore, targetSubStore) } diff --git a/store/v2alpha1/multi/store.go b/store/v2alpha1/multi/store.go index c39b8e0e854c..7a42fd33683d 100644 --- a/store/v2alpha1/multi/store.go +++ b/store/v2alpha1/multi/store.go @@ -1,6 +1,7 @@ package multi import ( + "encoding/binary" "errors" "fmt" "io" @@ -13,6 +14,8 @@ import ( dbm "github.com/cosmos/cosmos-sdk/db" prefixdb "github.com/cosmos/cosmos-sdk/db/prefix" util "github.com/cosmos/cosmos-sdk/internal" + dbutil "github.com/cosmos/cosmos-sdk/internal/db" + "github.com/cosmos/cosmos-sdk/pruning" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" sdkmaps "github.com/cosmos/cosmos-sdk/store/internal/maps" "github.com/cosmos/cosmos-sdk/store/listenkv" @@ -30,11 +33,14 @@ var ( _ types.Queryable = (*Store)(nil) _ types.CommitMultiStore = (*Store)(nil) _ types.CacheMultiStore = (*cacheStore)(nil) - _ types.BasicMultiStore = (*viewStore)(nil) + _ types.MultiStore = (*viewStore)(nil) _ types.KVStore = (*substore)(nil) ) var ( + ErrVersionDoesNotExist = errors.New("version does not exist") + ErrMaximumHeight = errors.New("maximum block height reached") + // Root prefixes merkleRootKey = []byte{0} // Key for root hash of namespace tree schemaPrefix = []byte{1} // Prefix for store keys (namespaces) @@ -42,41 +48,44 @@ var ( // Per-substore prefixes substoreMerkleRootKey = []byte{0} // Key for root hashes of Merkle trees - dataPrefix = []byte{1} // Prefix for state mappings - indexPrefix = []byte{2} // Prefix for Store reverse index - smtPrefix = []byte{3} // Prefix for SMT data - - ErrVersionDoesNotExist = errors.New("version does not exist") - ErrMaximumHeight = errors.New("maximum block height reached") + dataPrefix = []byte{1} // Prefix for store data + smtPrefix = []byte{2} // Prefix for tree data ) -func ErrStoreNotFound(skey string) error { - return fmt.Errorf("store does not exist for key: %s", skey) +func ErrStoreNotFound(key string) error { + return fmt.Errorf("store does not exist for key: %s", key) } -// StoreConfig is used to define a schema and other options and pass them to the MultiStore constructor. -type StoreConfig struct { +// StoreParams is used to define a schema and other options and pass them to the MultiStore constructor. +type StoreParams struct { // Version pruning options for backing DBs. Pruning pruningtypes.PruningOptions // The minimum allowed version number. InitialVersion uint64 - // The backing DB to use for the state commitment Merkle tree data. + // The optional backing DB to use for the state commitment Merkle tree data. // If nil, Merkle data is stored in the state storage DB under a separate prefix. StateCommitmentDB dbm.Connection - - prefixRegistry + // Contains the store schema and methods to modify it + SchemaBuilder + storeKeys + // Inter-block persistent cache to use. TODO: not used/impl'd PersistentCache types.MultiStorePersistentCache - Upgrades []types.StoreUpgrades - + // Any pending upgrades to apply on loading. + Upgrades *types.StoreUpgrades + // Contains The trace context and listeners that can also be set from store methods. *traceListenMixin } // StoreSchema defineds a mapping of substore keys to store types type StoreSchema map[string]types.StoreType +type StoreKeySchema map[types.StoreKey]types.StoreType + +// storeKeys maps key names to StoreKey instances +type storeKeys map[string]types.StoreKey // Store is the main persistent store type implementing CommitMultiStore. // Substores consist of an SMT-based state commitment store and state storage. -// Substores must be reserved in the StoreConfig or defined as part of a StoreUpgrade in order to be valid. +// Substores must be reserved in the StoreParams or defined as part of a StoreUpgrade in order to be valid. // Note: // The state commitment data and proof are structured in the same basic pattern as the MultiStore, but use an SMT rather than IAVL tree: // * The state commitment store of each substore consists of a independent SMT. @@ -87,16 +96,18 @@ type Store struct { StateCommitmentDB dbm.Connection stateCommitmentTxn dbm.ReadWriter - schema StoreSchema - mem *mem.Store - tran *transient.Store - mtx sync.RWMutex + schema StoreKeySchema + + mem *mem.Store + tran *transient.Store + mtx sync.RWMutex - // Copied from StoreConfig - Pruning pruningtypes.PruningOptions - InitialVersion uint64 // if + // Copied from StoreParams + InitialVersion uint64 *traceListenMixin + pruningManager *pruning.Manager + PersistentCache types.MultiStorePersistentCache substoreCache map[string]*substore } @@ -105,60 +116,29 @@ type substore struct { root *Store name string dataBucket dbm.ReadWriter - indexBucket dbm.ReadWriter - stateCommitmentStore *smt.Store -} - -// Branched state -type cacheStore struct { - source types.BasicMultiStore - substores map[string]types.CacheKVStore - *traceListenMixin -} - -// Read-only store for querying past versions -type viewStore struct { - stateView dbm.Reader - stateCommitmentView dbm.Reader - substoreCache map[string]*viewSubstore - schema StoreSchema -} - -type viewSubstore struct { - root *viewStore - name string - dataBucket dbm.Reader - indexBucket dbm.Reader stateCommitmentStore *smt.Store } // Builder type used to create a valid schema with no prefix conflicts -type prefixRegistry struct { +type SchemaBuilder struct { StoreSchema reserved []string } // Mixin type that to compose trace & listen state into each root store variant type type traceListenMixin struct { - listeners map[string][]types.WriteListener - TraceWriter io.Writer - TraceContext types.TraceContext + listeners map[types.StoreKey][]types.WriteListener + TraceWriter io.Writer + TraceContext types.TraceContext + traceContextMutex sync.RWMutex } func newTraceListenMixin() *traceListenMixin { - return &traceListenMixin{listeners: map[string][]types.WriteListener{}} + return &traceListenMixin{listeners: map[types.StoreKey][]types.WriteListener{}} } -// DefaultStoreConfig returns a MultiStore config with an empty schema, a single backing DB, -// pruning with PruneDefault, no listeners and no tracer. -func DefaultStoreConfig() StoreConfig { - return StoreConfig{ - Pruning: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), - prefixRegistry: prefixRegistry{ - StoreSchema: StoreSchema{}, - }, - traceListenMixin: newTraceListenMixin(), - } +func newSchemaBuilder() SchemaBuilder { + return SchemaBuilder{StoreSchema: StoreSchema{}} } // Returns true for valid store types for a MultiStore schema @@ -192,9 +172,25 @@ func (ss StoreSchema) equal(that StoreSchema) bool { return true } +func (this StoreSchema) matches(that StoreKeySchema) bool { + if len(this) != len(that) { + return false + } + for key, val := range that { + myval, has := this[key.Name()] + if !has { + return false + } + if val != myval { + return false + } + } + return true +} + // Parses a schema from the DB -func readSavedSchema(bucket dbm.Reader) (*prefixRegistry, error) { - ret := prefixRegistry{StoreSchema: StoreSchema{}} +func readSavedSchema(bucket dbm.Reader) (*SchemaBuilder, error) { + ret := newSchemaBuilder() it, err := bucket.Iterator(nil, nil) if err != nil { return nil, err @@ -215,7 +211,18 @@ func readSavedSchema(bucket dbm.Reader) (*prefixRegistry, error) { // NewStore constructs a MultiStore directly from a database. // Creates a new store if no data exists; otherwise loads existing data. -func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) { +func NewStore(db dbm.Connection, opts StoreParams) (ret *Store, err error) { + pruningManager := pruning.NewManager() + pruningManager.SetOptions(opts.Pruning) + { // load any pruned heights we missed from disk to be pruned on the next run + r := db.Reader() + defer r.Discard() + tmdb := dbutil.ReadWriterAsTmdb(dbm.ReaderAsReadWriter(r)) + if err = pruningManager.LoadPruningHeights(tmdb); err != nil { + return + } + } + versions, err := db.Versions() if err != nil { return @@ -230,8 +237,7 @@ func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) { // To abide by atomicity constraints, revert the DB to the last saved version, in case it contains // committed data in the "working" version. // This should only happen if Store.Commit previously failed. - err = db.Revert() - if err != nil { + if err = db.Revert(); err != nil { return } stateTxn := db.ReadWriter() @@ -243,8 +249,7 @@ func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) { stateCommitmentTxn := stateTxn if opts.StateCommitmentDB != nil { var scVersions dbm.VersionSet - scVersions, err = opts.StateCommitmentDB.Versions() - if err != nil { + if scVersions, err = opts.StateCommitmentDB.Versions(); err != nil { return } // Version sets of each DB must match @@ -252,8 +257,7 @@ func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) { err = fmt.Errorf("different version history between Storage and StateCommitment DB ") return } - err = opts.StateCommitmentDB.Revert() - if err != nil { + if err = opts.StateCommitmentDB.Revert(); err != nil { return } stateCommitmentTxn = opts.StateCommitmentDB.ReadWriter() @@ -266,14 +270,11 @@ func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) { stateCommitmentTxn: stateCommitmentTxn, mem: mem.NewStore(), tran: transient.NewStore(), - - substoreCache: map[string]*substore{}, - - traceListenMixin: opts.traceListenMixin, - PersistentCache: opts.PersistentCache, - - Pruning: opts.Pruning, - InitialVersion: opts.InitialVersion, + substoreCache: map[string]*substore{}, + traceListenMixin: opts.traceListenMixin, + PersistentCache: opts.PersistentCache, + pruningManager: pruningManager, + InitialVersion: opts.InitialVersion, } // Now load the substore schema @@ -284,58 +285,72 @@ func NewStore(db dbm.Connection, opts StoreConfig) (ret *Store, err error) { err = util.CombineErrors(err, ret.Close(), "base.Close also failed") } }() + writeSchema := func(sch StoreSchema) { + schemaWriter := prefixdb.NewWriter(ret.stateTxn, schemaPrefix) + var it dbm.Iterator + if it, err = schemaView.Iterator(nil, nil); err != nil { + return + } + for it.Next() { + err = schemaWriter.Delete(it.Key()) + if err != nil { + return + } + } + if err = it.Close(); err != nil { + return + } + if err = schemaView.Discard(); err != nil { + return + } + // NB. the migrated contents and schema are not committed until the next store.Commit + for skey, typ := range sch { + err = schemaWriter.Set([]byte(skey), []byte{byte(typ)}) + if err != nil { + return + } + } + } + reg, err := readSavedSchema(schemaView) if err != nil { return } // If the loaded schema is empty (for new store), just copy the config schema; - // Otherwise, verify it is identical to the config schema + // Otherwise, migrate, then verify it is identical to the config schema if len(reg.StoreSchema) == 0 { - for k, v := range opts.StoreSchema { - reg.StoreSchema[k] = v + writeSchema(opts.StoreSchema) + } else { + // Apply migrations to the schema + if opts.Upgrades != nil { + err = reg.migrateSchema(*opts.Upgrades) + if err != nil { + return + } } - reg.reserved = make([]string, len(opts.reserved)) - copy(reg.reserved, opts.reserved) - } else if !reg.equal(opts.StoreSchema) { - err = errors.New("loaded schema does not match configured schema") - return - } - - // Apply migrations, then clear old schema and write the new one - for _, upgrades := range opts.Upgrades { - err = reg.migrate(ret, upgrades) - if err != nil { + if !reg.equal(opts.StoreSchema) { + err = errors.New("loaded schema does not match configured schema") return } - } - schemaWriter := prefixdb.NewWriter(ret.stateTxn, schemaPrefix) - it, err := schemaView.Iterator(nil, nil) - if err != nil { - return - } - for it.Next() { - err = schemaWriter.Delete(it.Key()) - if err != nil { - return + if opts.Upgrades != nil { + err = migrateData(ret, *opts.Upgrades) + if err != nil { + return + } + writeSchema(opts.StoreSchema) } } - err = it.Close() - if err != nil { - return - } - err = schemaView.Discard() - if err != nil { - return - } - // NB. the migrated contents and schema are not committed until the next store.Commit - for skey, typ := range reg.StoreSchema { - err = schemaWriter.Set([]byte(skey), []byte{byte(typ)}) + ret.schema = StoreKeySchema{} + for key, typ := range opts.StoreSchema { + var skey types.StoreKey + skey, err = opts.storeKey(key) if err != nil { return } + ret.schema[skey] = typ } - ret.schema = reg.StoreSchema - return ret, err + + return } func (s *Store) Close() error { @@ -347,7 +362,7 @@ func (s *Store) Close() error { } // Applies store upgrades to the DB contents. -func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) error { +func migrateData(store *Store, upgrades types.StoreUpgrades) error { // Get a view of current state to allow mutation while iterating reader := store.stateDB.Reader() scReader := reader @@ -356,24 +371,16 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er } for _, key := range upgrades.Deleted { - sst, ix, err := pr.storeInfo(key) - if err != nil { - return err - } - if sst != types.StoreTypePersistent { - return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", key, sst) - } - pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...) - delete(pr.StoreSchema, key) - - pfx := substorePrefix(key) + pfx := prefixSubstore(key) subReader := prefixdb.NewReader(reader, pfx) it, err := subReader.Iterator(nil, nil) if err != nil { return err } for it.Next() { - store.stateTxn.Delete(it.Key()) + if err = store.stateTxn.Delete(it.Key()); err != nil { + return err + } } it.Close() if store.StateCommitmentDB != nil { @@ -385,26 +392,14 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er for it.Next() { store.stateCommitmentTxn.Delete(it.Key()) } - it.Close() + if err = it.Close(); err != nil { + return err + } } } for _, rename := range upgrades.Renamed { - sst, ix, err := pr.storeInfo(rename.OldKey) - if err != nil { - return err - } - if sst != types.StoreTypePersistent { - return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", rename.OldKey, sst) - } - pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...) - delete(pr.StoreSchema, rename.OldKey) - err = pr.RegisterSubstore(rename.NewKey, types.StoreTypePersistent) - if err != nil { - return err - } - - oldPrefix := substorePrefix(rename.OldKey) - newPrefix := substorePrefix(rename.NewKey) + oldPrefix := prefixSubstore(rename.OldKey) + newPrefix := prefixSubstore(rename.NewKey) subReader := prefixdb.NewReader(reader, oldPrefix) subWriter := prefixdb.NewWriter(store.stateTxn, newPrefix) it, err := subReader.Iterator(nil, nil) @@ -414,7 +409,9 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er for it.Next() { subWriter.Set(it.Key(), it.Value()) } - it.Close() + if it.Close(); err != nil { + return err + } if store.StateCommitmentDB != nil { subReader = prefixdb.NewReader(scReader, oldPrefix) subWriter = prefixdb.NewWriter(store.stateCommitmentTxn, newPrefix) @@ -425,28 +422,37 @@ func (pr *prefixRegistry) migrate(store *Store, upgrades types.StoreUpgrades) er for it.Next() { subWriter.Set(it.Key(), it.Value()) } - it.Close() - } - } - - for _, key := range upgrades.Added { - err := pr.RegisterSubstore(key, types.StoreTypePersistent) - if err != nil { - return err + if it.Close(); err != nil { + return err + } } } return nil } -func substorePrefix(key string) []byte { - return append(contentPrefix, key...) +// encode key length as varint +func varintLen(l int) []byte { + buf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(buf, uint64(l)) + return buf[:n] } -// GetKVStore implements BasicMultiStore. +func prefixSubstore(key string) []byte { + lv := varintLen(len(key)) + ret := append(lv, key...) + return append(contentPrefix, ret...) +} + +func prefixNonpersistent(key string) []byte { + lv := varintLen(len(key)) + return append(lv, key...) +} + +// GetKVStore implements MultiStore. func (s *Store) GetKVStore(skey types.StoreKey) types.KVStore { key := skey.Name() var parent types.KVStore - typ, has := s.schema[key] + typ, has := s.schema[skey] if !has { panic(ErrStoreNotFound(key)) } @@ -459,9 +465,10 @@ func (s *Store) GetKVStore(skey types.StoreKey) types.KVStore { default: panic(fmt.Errorf("StoreType not supported: %v", typ)) // should never happen } + var ret types.KVStore if parent != nil { // store is non-persistent - ret = prefix.NewStore(parent, []byte(key)) + ret = prefix.NewStore(parent, prefixNonpersistent(key)) } else { // store is persistent sub, err := s.getSubstore(key) if err != nil { @@ -475,13 +482,19 @@ func (s *Store) GetKVStore(skey types.StoreKey) types.KVStore { return s.wrapTraceListen(ret, skey) } +// HasKVStore implements MultiStore. +func (rs *Store) HasKVStore(skey types.StoreKey) bool { + _, has := rs.schema[skey] + return has +} + // Gets a persistent substore. This reads, but does not update the substore cache. // Use it in cases where we need to access a store internally (e.g. read/write Merkle keys, queries) func (s *Store) getSubstore(key string) (*substore, error) { if cached, has := s.substoreCache[key]; has { return cached, nil } - pfx := substorePrefix(key) + pfx := prefixSubstore(key) stateRW := prefixdb.NewReadWriter(s.stateTxn, pfx) stateCommitmentRW := prefixdb.NewReadWriter(s.stateCommitmentTxn, pfx) var stateCommitmentStore *smt.Store @@ -501,18 +514,16 @@ func (s *Store) getSubstore(key string) (*substore, error) { root: s, name: key, dataBucket: prefixdb.NewReadWriter(stateRW, dataPrefix), - indexBucket: prefixdb.NewReadWriter(stateRW, indexPrefix), stateCommitmentStore: stateCommitmentStore, }, nil } // Resets a substore's state after commit (because root stateTxn has been discarded) func (s *substore) refresh(rootHash []byte) { - pfx := substorePrefix(s.name) + pfx := prefixSubstore(s.name) stateRW := prefixdb.NewReadWriter(s.root.stateTxn, pfx) stateCommitmentRW := prefixdb.NewReadWriter(s.root.stateCommitmentTxn, pfx) s.dataBucket = prefixdb.NewReadWriter(stateRW, dataPrefix) - s.indexBucket = prefixdb.NewReadWriter(stateRW, indexPrefix) s.stateCommitmentStore = loadSMT(stateCommitmentRW, rootHash) } @@ -539,37 +550,67 @@ func (s *Store) Commit() types.CommitID { panic(err) } - // Prune if necessary - previous := cid.Version - 1 - if s.Pruning.Interval != 0 && cid.Version%int64(s.Pruning.Interval) == 0 { - // The range of newly prunable versions - lastPrunable := previous - int64(s.Pruning.KeepRecent) - firstPrunable := lastPrunable - int64(s.Pruning.Interval) + if err = s.handlePruning(cid.Version); err != nil { + panic(err) + } + + s.tran.Commit() + return *cid +} - for version := firstPrunable; version <= lastPrunable; version++ { - s.stateDB.DeleteVersion(uint64(version)) +func (rs *Store) handlePruning(current int64) error { + // Pass DB txn to pruning manager via adapter; running txns must be refreshed after this. + // This is hacky but needed in order to restrict to a single txn (for memdb compatibility) + // since the manager calls SetSync internally. + rs.stateTxn.Discard() + defer rs.refreshTransactions(true) + db := rs.stateDB.ReadWriter() + rs.pruningManager.HandleHeight(current-1, dbutil.ReadWriterAsTmdb(db)) // we should never prune the current version. + db.Discard() + if !rs.pruningManager.ShouldPruneAtHeight(current) { + return nil + } + db = rs.stateDB.ReadWriter() + defer db.Discard() + pruningHeights, err := rs.pruningManager.GetFlushAndResetPruningHeights(dbutil.ReadWriterAsTmdb(db)) + if err != nil { + return err + } + return pruneVersions(pruningHeights, func(ver int64) error { + if err := rs.stateDB.DeleteVersion(uint64(ver)); err != nil { + return fmt.Errorf("error pruning StateDB: %w", err) + } - if s.StateCommitmentDB != nil { - s.StateCommitmentDB.DeleteVersion(uint64(version)) + if rs.StateCommitmentDB != nil { + if err := rs.StateCommitmentDB.DeleteVersion(uint64(ver)); err != nil { + return fmt.Errorf("error pruning StateCommitmentDB: %w", err) } } - } + return nil + }) +} - s.tran.Commit() - return *cid +// Performs necessary pruning via callback +func pruneVersions(heights []int64, prune func(int64) error) error { + for _, height := range heights { + if err := prune(height); err != nil { + return err + } + } + return nil } func (s *Store) getMerkleRoots() (ret map[string][]byte, err error) { ret = map[string][]byte{} for key := range s.schema { - sub, has := s.substoreCache[key] + sub, has := s.substoreCache[key.Name()] if !has { - sub, err = s.getSubstore(key) + sub, err = s.getSubstore(key.Name()) if err != nil { return } } - ret[key] = sub.stateCommitmentStore.Root() + ret[key.Name()] = sub.stateCommitmentStore.Root() } return } @@ -582,9 +623,8 @@ func (s *Store) commit(target uint64) (id *types.CommitID, err error) { } // Update substore Merkle roots for key, storeHash := range storeHashes { - pfx := substorePrefix(key) - stateW := prefixdb.NewReadWriter(s.stateTxn, pfx) - if err = stateW.Set(substoreMerkleRootKey, storeHash); err != nil { + w := prefixdb.NewReadWriter(s.stateTxn, prefixSubstore(key)) + if err = w.Set(substoreMerkleRootKey, storeHash); err != nil { return } } @@ -600,19 +640,15 @@ func (s *Store) commit(target uint64) (id *types.CommitID, err error) { err = util.CombineErrors(err, s.stateDB.Revert(), "stateDB.Revert also failed") } }() - err = s.stateDB.SaveVersion(target) - if err != nil { + if err = s.stateDB.SaveVersion(target); err != nil { return } - stateTxn := s.stateDB.ReadWriter() defer func() { if err != nil { - err = util.CombineErrors(err, stateTxn.Discard(), "stateTxn.Discard also failed") + err = util.CombineErrors(err, s.stateTxn.Discard(), "stateTxn.Discard also failed") } }() - stateCommitmentTxn := stateTxn - // If DBs are not separate, StateCommitment state has been committed & snapshotted if s.StateCommitmentDB != nil { // if any error is encountered henceforth, we must revert the state and SC dbs @@ -634,21 +670,41 @@ func (s *Store) commit(target uint64) (id *types.CommitID, err error) { } }() - err = s.StateCommitmentDB.SaveVersion(target) - if err != nil { + if err = s.StateCommitmentDB.SaveVersion(target); err != nil { return } - stateCommitmentTxn = s.StateCommitmentDB.ReadWriter() } - s.stateTxn = stateTxn - s.stateCommitmentTxn = stateCommitmentTxn + // flush is complete, refresh our DB read/writers + if err = s.refreshTransactions(false); err != nil { + return + } + + return &types.CommitID{Version: int64(target), Hash: rootHash}, nil +} + +// Resets the txn objects in the store (does not discard current txns), then propagates +// them to cached substores. +// justState indicates we only need to refresh the stateDB txn. +func (s *Store) refreshTransactions(onlyState bool) error { + s.stateTxn = s.stateDB.ReadWriter() + if s.StateCommitmentDB != nil { + if !onlyState { + s.stateCommitmentTxn = s.StateCommitmentDB.ReadWriter() + } + } else { + s.stateCommitmentTxn = s.stateTxn + } + + storeHashes, err := s.getMerkleRoots() + if err != nil { + return err + } // the state of all live substores must be refreshed for key, sub := range s.substoreCache { sub.refresh(storeHashes[key]) } - - return &types.CommitID{Version: int64(target), Hash: rootHash}, nil + return nil } // LastCommitID implements Committer. @@ -676,31 +732,45 @@ func (s *Store) SetInitialVersion(version uint64) error { } // GetVersion implements CommitMultiStore. -func (s *Store) GetVersion(version int64) (types.BasicMultiStore, error) { +func (s *Store) GetVersion(version int64) (types.MultiStore, error) { return s.getView(version) } -// CacheMultiStore implements BasicMultiStore. -func (s *Store) CacheMultiStore() types.CacheMultiStore { - return &cacheStore{ - source: s, - substores: map[string]types.CacheKVStore{}, - traceListenMixin: newTraceListenMixin(), +// CacheWrap implements MultiStore. +func (s *Store) CacheWrap() types.CacheMultiStore { + return newCacheStore(s) +} + +// GetAllVersions returns all available versions. +// https://github.com/cosmos/cosmos-sdk/pull/11124 +func (s *Store) GetAllVersions() []uint64 { + vs, err := s.stateDB.Versions() + if err != nil { + panic(err) + } + var ret []uint64 + for it := vs.Iterator(); it.Next(); { + ret = append(ret, it.Value()) } + return ret } // PruneSnapshotHeight prunes the given height according to the prune strategy. // If PruneNothing, this is a no-op. // If other strategy, this height is persisted until it is // less than - KeepRecent and % Interval == 0 -func (s *Store) PruneSnapshotHeight(height int64) { - panic("not implemented") +func (rs *Store) PruneSnapshotHeight(height int64) { + rs.stateTxn.Discard() + defer rs.refreshTransactions(true) + db := rs.stateDB.ReadWriter() + defer db.Discard() + rs.pruningManager.HandleHeightSnapshot(height, dbutil.ReadWriterAsTmdb(db)) } // SetSnapshotInterval sets the interval at which the snapshots are taken. // It is used by the store to determine which heights to retain until after the snapshot is complete. -func (s *Store) SetSnapshotInterval(snapshotInterval uint64) { - panic("not implemented") +func (rs *Store) SetSnapshotInterval(snapshotInterval uint64) { + rs.pruningManager.SetSnapshotInterval(snapshotInterval) } // parsePath expects a format like /[/] @@ -764,9 +834,6 @@ func (s *Store) Query(req abci.RequestQuery) (res abci.ResponseQuery) { return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access height"), false) } - if _, has := s.schema[storeName]; !has { - return sdkerrors.QueryResult(sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "no such store: %s", storeName), false) - } substore, err := view.getSubstore(storeName) if err != nil { return sdkerrors.QueryResult(sdkerrors.Wrapf(err, "failed to access store: %s", storeName), false) @@ -838,51 +905,85 @@ func binarySearch(hay []string, ndl string) (int, bool) { return from, false } -func (pr *prefixRegistry) storeInfo(key string) (sst types.StoreType, ix int, err error) { - ix, has := binarySearch(pr.reserved, key) +// Migrates the state of the registry based on the upgrades +func (pr *SchemaBuilder) migrateSchema(upgrades types.StoreUpgrades) error { + for _, key := range upgrades.Deleted { + sst, ix, err := pr.storeInfo(key) + if err != nil { + return err + } + if sst != types.StoreTypePersistent { + return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", key, sst) + } + pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...) + delete(pr.StoreSchema, key) + } + for _, rename := range upgrades.Renamed { + sst, ix, err := pr.storeInfo(rename.OldKey) + if err != nil { + return err + } + if sst != types.StoreTypePersistent { + return fmt.Errorf("prefix is for non-persistent substore: %v (%v)", rename.OldKey, sst) + } + pr.reserved = append(pr.reserved[:ix], pr.reserved[ix+1:]...) + delete(pr.StoreSchema, rename.OldKey) + err = pr.registerName(rename.NewKey, types.StoreTypePersistent) + if err != nil { + return err + } + } + for _, key := range upgrades.Added { + err := pr.registerName(key, types.StoreTypePersistent) + if err != nil { + return err + } + } + return nil +} + +func (reg *SchemaBuilder) storeInfo(key string) (sst types.StoreType, ix int, err error) { + ix, has := binarySearch(reg.reserved, key) if !has { - err = fmt.Errorf("prefix does not exist: %v", key) + err = fmt.Errorf("name does not exist: %v", key) return } - sst, has = pr.StoreSchema[key] + sst, has = reg.StoreSchema[key] if !has { - err = fmt.Errorf("prefix is registered but not in schema: %v", key) + err = fmt.Errorf("name is registered but not in schema: %v", key) } return } -func (pr *prefixRegistry) RegisterSubstore(key string, typ types.StoreType) error { - if !validSubStoreType(typ) { - return fmt.Errorf("StoreType not supported: %v", typ) - } - +// registerName registers a store key by name only +func (reg *SchemaBuilder) registerName(key string, typ types.StoreType) error { // Find the neighboring reserved prefix, and check for duplicates and conflicts - i, has := binarySearch(pr.reserved, key) + i, has := binarySearch(reg.reserved, key) if has { - return fmt.Errorf("prefix already exists: %v", key) - } - if i > 0 && strings.HasPrefix(key, pr.reserved[i-1]) { - return fmt.Errorf("prefix conflict: '%v' exists, cannot add '%v'", pr.reserved[i-1], key) - } - if i < len(pr.reserved) && strings.HasPrefix(pr.reserved[i], key) { - return fmt.Errorf("prefix conflict: '%v' exists, cannot add '%v'", pr.reserved[i], key) - } - reserved := pr.reserved[:i] + return fmt.Errorf("name already exists: %v", key) + } + // TODO auth vs authz ? + // if i > 0 && strings.HasPrefix(key, reg.reserved[i-1]) { + // return fmt.Errorf("name conflict: '%v' exists, cannot add '%v'", reg.reserved[i-1], key) + // } + // if i < len(reg.reserved) && strings.HasPrefix(reg.reserved[i], key) { + // return fmt.Errorf("name conflict: '%v' exists, cannot add '%v'", reg.reserved[i], key) + // } + reserved := reg.reserved[:i] reserved = append(reserved, key) - pr.reserved = append(reserved, pr.reserved[i:]...) - pr.StoreSchema[key] = typ + reg.reserved = append(reserved, reg.reserved[i:]...) + reg.StoreSchema[key] = typ return nil } func (tlm *traceListenMixin) AddListeners(skey types.StoreKey, listeners []types.WriteListener) { - key := skey.Name() - tlm.listeners[key] = append(tlm.listeners[key], listeners...) + tlm.listeners[skey] = append(tlm.listeners[skey], listeners...) } // ListeningEnabled returns if listening is enabled for a specific KVStore func (tlm *traceListenMixin) ListeningEnabled(key types.StoreKey) bool { - if ls, has := tlm.listeners[key.Name()]; has { + if ls, has := tlm.listeners[key]; has { return len(ls) != 0 } return false @@ -895,20 +996,47 @@ func (tlm *traceListenMixin) TracingEnabled() bool { func (tlm *traceListenMixin) SetTracer(w io.Writer) { tlm.TraceWriter = w } +func (tlm *traceListenMixin) SetTracingContext(tc types.TraceContext) { + tlm.traceContextMutex.Lock() + defer tlm.traceContextMutex.Unlock() + if tlm.TraceContext != nil { + for k, v := range tc { + tlm.TraceContext[k] = v + } + } else { + tlm.TraceContext = tc + } +} -func (tlm *traceListenMixin) SetTraceContext(tc types.TraceContext) { - tlm.TraceContext = tc +func (tlm *traceListenMixin) getTracingContext() types.TraceContext { + tlm.traceContextMutex.Lock() + defer tlm.traceContextMutex.Unlock() + + if tlm.TraceContext == nil { + return nil + } + + ctx := types.TraceContext{} + for k, v := range tlm.TraceContext { + ctx[k] = v + } + return ctx } func (tlm *traceListenMixin) wrapTraceListen(store types.KVStore, skey types.StoreKey) types.KVStore { if tlm.TracingEnabled() { - store = tracekv.NewStore(store, tlm.TraceWriter, tlm.TraceContext) + store = tracekv.NewStore(store, tlm.TraceWriter, tlm.getTracingContext()) } if tlm.ListeningEnabled(skey) { - store = listenkv.NewStore(store, skey, tlm.listeners[skey.Name()]) + store = listenkv.NewStore(store, skey, tlm.listeners[skey]) } return store } -func (s *Store) GetPruning() pruningtypes.PruningOptions { return s.Pruning } -func (s *Store) SetPruning(po pruningtypes.PruningOptions) { s.Pruning = po } +func (s *Store) GetPruning() pruningtypes.PruningOptions { + return s.pruningManager.GetOptions() +} + +func (s *Store) SetPruning(po pruningtypes.PruningOptions) { + s.pruningManager.SetOptions(po) +} diff --git a/store/v2alpha1/multi/store_test.go b/store/v2alpha1/multi/store_test.go index 9837a891ced9..ff97c0dadec5 100644 --- a/store/v2alpha1/multi/store_test.go +++ b/store/v2alpha1/multi/store_test.go @@ -4,6 +4,7 @@ import ( "bytes" "math" "testing" + "time" "github.com/stretchr/testify/require" @@ -30,25 +31,35 @@ var ( skey_1b = types.NewKVStoreKey("store1b") skey_2b = types.NewKVStoreKey("store2b") skey_3b = types.NewKVStoreKey("store3b") + + skey_mem1 = types.NewMemoryStoreKey("mstore1") + skey_tran1 = types.NewTransientStoreKey("tstore1") ) -func simpleStoreConfig(t *testing.T) StoreConfig { - opts := DefaultStoreConfig() - require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)) +// Factored out so the same tests can be run on Store and adaptor (v1asv2) +type storeConstructor = func(dbm.Connection, StoreParams) (types.CommitMultiStore, error) + +func multistoreConstructor(db dbm.Connection, params StoreParams) (types.CommitMultiStore, error) { + return NewStore(db, params) +} + +func storeParams1(t *testing.T) StoreParams { + opts := DefaultStoreParams() + require.NoError(t, opts.RegisterSubstore(skey_1, types.StoreTypePersistent)) return opts } -func storeConfig123(t *testing.T) StoreConfig { - opts := DefaultStoreConfig() +func storeParams123(t *testing.T) StoreParams { + opts := DefaultStoreParams() opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) - require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)) - require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypePersistent)) - require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_1, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_2, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_3, types.StoreTypePersistent)) return opts } func newSubStoreWithData(t *testing.T, db dbm.Connection, storeData map[string]string) (*Store, types.KVStore) { - root, err := NewStore(db, simpleStoreConfig(t)) + root, err := NewStore(db, storeParams1(t)) require.NoError(t, err) store := root.GetKVStore(skey_1) @@ -58,6 +69,81 @@ func newSubStoreWithData(t *testing.T, db dbm.Connection, storeData map[string]s return root, store } +func TestStoreParams(t *testing.T) { + opts := DefaultStoreParams() + // Fail with invalid type enum + require.Error(t, opts.RegisterSubstore(skey_1, types.StoreTypeDB)) + require.Error(t, opts.RegisterSubstore(skey_1, types.StoreTypeSMT)) + // Mem & tranient stores need corresponding concrete type + require.Error(t, opts.RegisterSubstore(skey_1, types.StoreTypeMemory)) + require.Error(t, opts.RegisterSubstore(skey_1, types.StoreTypeTransient)) + require.NoError(t, opts.RegisterSubstore(skey_mem1, types.StoreTypeMemory)) + require.NoError(t, opts.RegisterSubstore(skey_tran1, types.StoreTypeTransient)) + // Unambiguous prefixes are valid + require.NoError(t, opts.RegisterSubstore(skey_1, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_2, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_3b, types.StoreTypePersistent)) + // Prefixes with conflicts are also allowed + require.NoError(t, opts.RegisterSubstore(skey_1b, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_2b, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_3, types.StoreTypePersistent)) +} + +func TestMultiStoreBasic(t *testing.T) { + doTestMultiStoreBasic(t, multistoreConstructor) +} + +func doTestMultiStoreBasic(t *testing.T, ctor storeConstructor) { + opts := DefaultStoreParams() + require.NoError(t, opts.RegisterSubstore(skey_1, types.StoreTypePersistent)) + store, err := ctor(memdb.NewDB(), opts) + require.NoError(t, err) + + require.True(t, store.HasKVStore(skey_1)) + require.False(t, store.HasKVStore(skey_2)) + require.Panics(t, func() { store.GetKVStore(skey_2) }) + + store_1 := store.GetKVStore(skey_1) + require.NotNil(t, store_1) + store_1.Set([]byte{0}, []byte{0}) + val := store_1.Get([]byte{0}) + require.Equal(t, []byte{0}, val) + store_1.Delete([]byte{0}) + val = store_1.Get([]byte{0}) + require.Equal(t, []byte(nil), val) +} + +func TestSubstoreBasic(t *testing.T) { + badkey := skey_1.Name() + string(dataPrefix) + skey_bad := types.NewKVStoreKey(badkey) + + opts := DefaultStoreParams() + require.NoError(t, opts.RegisterSubstore(skey_1, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_bad, types.StoreTypePersistent)) + store, err := NewStore(memdb.NewDB(), opts) + require.NoError(t, err) + + // Test that substores do not leak into those with conflicting prefixes + // i.e., some unambiguous encoding of store keys is used + store_bad := store.GetKVStore(skey_bad) + require.NotNil(t, store_bad) + store_bad.Set([]byte("1bad"), []byte{0x1b}) + require.Equal(t, []byte{0x1b}, store_bad.Get([]byte("1bad"))) + + store_1 := store.GetKVStore(skey_1) + require.NotNil(t, store_1) + store_1.Set([]byte{0}, []byte{0}) + + count := 0 + it := store_1.Iterator(nil, nil) + for ; it.Valid(); it.Next() { + require.Equal(t, []byte{0}, it.Key()) + count++ + } + require.NoError(t, it.Close()) + require.Equal(t, 1, count) +} + func TestGetSetHasDelete(t *testing.T) { _, store := newSubStoreWithData(t, memdb.NewDB(), alohaData) key := "hello" @@ -84,74 +170,6 @@ func TestGetSetHasDelete(t *testing.T) { require.Panics(t, func() { store.Set(nil, []byte("value")) }, "Set(nil key) should panic") require.Panics(t, func() { store.Set([]byte{}, []byte("value")) }, "Set(empty key) should panic") require.Panics(t, func() { store.Set([]byte("key"), nil) }, "Set(nil value) should panic") - sub := store.(*substore) - sub.indexBucket = rwCrudFails{sub.indexBucket, nil} - require.Panics(t, func() { - store.Set([]byte("key"), []byte("value")) - }, "Set() when index fails should panic") -} - -func TestConstructors(t *testing.T) { - db := memdb.NewDB() - - store, err := NewStore(db, simpleStoreConfig(t)) - require.NoError(t, err) - _ = store.GetKVStore(skey_1) - store.Commit() - require.NoError(t, store.Close()) - - t.Run("fail to load if InitialVersion > lowest existing version", func(t *testing.T) { - opts := StoreConfig{InitialVersion: 5, Pruning: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)} - store, err = NewStore(db, opts) - require.Error(t, err) - db.Close() - }) - - t.Run("can't load store when db.Versions fails", func(t *testing.T) { - store, err = NewStore(dbVersionsFails{memdb.NewDB()}, DefaultStoreConfig()) - require.Error(t, err) - store, err = NewStore(db, StoreConfig{StateCommitmentDB: dbVersionsFails{memdb.NewDB()}}) - require.Error(t, err) - }) - - db = memdb.NewDB() - merkledb := memdb.NewDB() - w := db.Writer() - t.Run("can't use a DB with open writers", func(t *testing.T) { - store, err = NewStore(db, DefaultStoreConfig()) - require.Error(t, err) - w.Discard() - w = merkledb.Writer() - store, err = NewStore(db, StoreConfig{StateCommitmentDB: merkledb}) - require.Error(t, err) - w.Discard() - }) - - t.Run("can't use DBs with different version history", func(t *testing.T) { - merkledb.SaveNextVersion() - store, err = NewStore(db, StoreConfig{StateCommitmentDB: merkledb}) - require.Error(t, err) - }) - merkledb.Close() - - t.Run("can't load existing store if we can't access root hash", func(t *testing.T) { - store, err = NewStore(db, simpleStoreConfig(t)) - require.NoError(t, err) - store.Commit() - require.NoError(t, store.Close()) - // ...whether because root is misssing - w = db.Writer() - s1RootKey := append(contentPrefix, substorePrefix(skey_1.Name())...) - s1RootKey = append(s1RootKey, merkleRootKey...) - w.Delete(s1RootKey) - w.Commit() - db.SaveNextVersion() - store, err = NewStore(db, DefaultStoreConfig()) - require.Error(t, err) - // ...or, because of an error - store, err = NewStore(dbRWCrudFails{db}, DefaultStoreConfig()) - require.Error(t, err) - }) } func TestIterators(t *testing.T) { @@ -215,8 +233,71 @@ func TestIterators(t *testing.T) { require.Panics(t, func() { store.ReverseIterator(nil, []byte{}) }, "Iterator(empty key) should panic") } +func TestConstructors(t *testing.T) { + db := memdb.NewDB() + + store, err := NewStore(db, storeParams1(t)) + require.NoError(t, err) + _ = store.GetKVStore(skey_1) + store.Commit() + require.NoError(t, store.Close()) + + t.Run("fail to load if InitialVersion > lowest existing version", func(t *testing.T) { + opts := StoreParams{InitialVersion: 5, Pruning: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)} + store, err = NewStore(db, opts) + require.Error(t, err) + db.Close() + }) + + t.Run("can't load store when db.Versions fails", func(t *testing.T) { + store, err = NewStore(dbVersionsFails{memdb.NewDB()}, DefaultStoreParams()) + require.Error(t, err) + store, err = NewStore(db, StoreParams{StateCommitmentDB: dbVersionsFails{memdb.NewDB()}}) + require.Error(t, err) + }) + + db = memdb.NewDB() + merkledb := memdb.NewDB() + w := db.Writer() + t.Run("can't use a DB with open writers", func(t *testing.T) { + store, err = NewStore(db, DefaultStoreParams()) + require.Error(t, err) + w.Discard() + w = merkledb.Writer() + store, err = NewStore(db, StoreParams{StateCommitmentDB: merkledb}) + require.Error(t, err) + w.Discard() + }) + + t.Run("can't use DBs with different version history", func(t *testing.T) { + merkledb.SaveNextVersion() + store, err = NewStore(db, StoreParams{StateCommitmentDB: merkledb}) + require.Error(t, err) + }) + merkledb.Close() + + t.Run("can't load existing store if we can't access root hash", func(t *testing.T) { + store, err = NewStore(db, storeParams1(t)) + require.NoError(t, err) + store.Commit() + require.NoError(t, store.Close()) + // ...whether because root is misssing + w = db.Writer() + s1RootKey := append(contentPrefix, prefixSubstore(skey_1.Name())...) + s1RootKey = append(s1RootKey, merkleRootKey...) + w.Delete(s1RootKey) + w.Commit() + db.SaveNextVersion() + store, err = NewStore(db, DefaultStoreParams()) + require.Error(t, err) + // ...or, because of an error + store, err = NewStore(dbRWCrudFails{db}, DefaultStoreParams()) + require.Error(t, err) + }) +} + func TestCommit(t *testing.T) { - testBasic := func(opts StoreConfig) { + testBasic := func(opts StoreParams) { db := memdb.NewDB() store, err := NewStore(db, opts) require.NoError(t, err) @@ -226,19 +307,25 @@ func TestCommit(t *testing.T) { // Adding one record changes the hash s1 := store.GetKVStore(skey_1) s1.Set([]byte{0}, []byte{0}) - idOne := store.Commit() - require.Equal(t, idNew.Version+1, idOne.Version) - require.NotEqual(t, idNew.Hash, idOne.Hash) + id := store.Commit() + require.Equal(t, idNew.Version+1, id.Version) + require.NotEqual(t, idNew.Hash, id.Hash) // Hash of emptied store is same as new store s1.Delete([]byte{0}) - idEmptied := store.Commit() - require.Equal(t, idNew.Hash, idEmptied.Hash) + id = store.Commit() + require.Equal(t, idNew.Hash, id.Hash) + + // We can set and delete the same key within a transaction + s1.Set([]byte("might"), []byte("delete")) + s1.Delete([]byte("might")) + id = store.Commit() + require.Equal(t, idNew.Hash, id.Hash) - previd := idOne + previd := id for i := byte(1); i < 5; i++ { s1.Set([]byte{i}, []byte{i}) - id := store.Commit() + id = store.Commit() lastid := store.LastCommitID() require.Equal(t, id.Hash, lastid.Hash) require.Equal(t, id.Version, lastid.Version) @@ -246,7 +333,7 @@ func TestCommit(t *testing.T) { require.NotEqual(t, previd.Version, id.Version) } } - basicOpts := simpleStoreConfig(t) + basicOpts := storeParams1(t) basicOpts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) t.Run("sanity tests for Merkle hashing", func(t *testing.T) { testBasic(basicOpts) @@ -260,8 +347,7 @@ func TestCommit(t *testing.T) { testFailedCommit := func(t *testing.T, store *Store, db dbm.Connection, - opts StoreConfig, - ) { + opts StoreParams) { if db == nil { db = store.stateDB } @@ -286,7 +372,7 @@ func TestCommit(t *testing.T) { require.NoError(t, store.Close()) } - opts := simpleStoreConfig(t) + opts := storeParams1(t) opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) // Ensure Store's commit is rolled back in each failure case... @@ -324,7 +410,7 @@ func TestCommit(t *testing.T) { testFailedCommit(t, store, nil, opts) }) - opts = simpleStoreConfig(t) + opts = storeParams1(t) t.Run("recover after stateDB.Versions error triggers failure", func(t *testing.T) { db := memdb.NewDB() store, err := NewStore(db, opts) @@ -359,7 +445,7 @@ func TestCommit(t *testing.T) { }) t.Run("first commit version matches InitialVersion", func(t *testing.T) { - opts = simpleStoreConfig(t) + opts = storeParams1(t) opts.InitialVersion = 5 opts.Pruning = pruningtypes.NewPruningOptions(pruningtypes.PruningNothing) opts.StateCommitmentDB = memdb.NewDB() @@ -369,14 +455,14 @@ func TestCommit(t *testing.T) { }) // test improbable failures to fill out test coverage - opts = simpleStoreConfig(t) + opts = storeParams1(t) store, err := NewStore(memdb.NewDB(), opts) require.NoError(t, err) store.Commit() store.stateDB = dbVersionsFails{store.stateDB} require.Panics(t, func() { store.LastCommitID() }) - opts = simpleStoreConfig(t) + opts = storeParams1(t) opts.StateCommitmentDB = memdb.NewDB() store, err = NewStore(memdb.NewDB(), opts) require.NoError(t, err) @@ -394,23 +480,30 @@ func sliceToSet(slice []uint64) map[uint64]struct{} { } func TestPruning(t *testing.T) { + doTestPruning(t, multistoreConstructor, true) +} + +func doTestPruning(t *testing.T, ctor storeConstructor, sepDBs bool) { // Save versions up to 10 and verify pruning at final commit testCases := []struct { pruningtypes.PruningOptions kept []uint64 }{ + {pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, + {pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []uint64{8, 9, 10}}, {pruningtypes.NewCustomPruningOptions(2, 10), []uint64{8, 9, 10}}, {pruningtypes.NewCustomPruningOptions(0, 10), []uint64{10}}, - {pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), []uint64{8, 9, 10}}, - {pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), []uint64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}}, } for tci, tc := range testCases { - dbs := []dbm.Connection{memdb.NewDB(), memdb.NewDB()} - opts := simpleStoreConfig(t) + opts := storeParams1(t) opts.Pruning = tc.PruningOptions - opts.StateCommitmentDB = dbs[1] - store, err := NewStore(dbs[0], opts) + dbs := []dbm.Connection{memdb.NewDB()} + if sepDBs { + dbs = append(dbs, memdb.NewDB()) + opts.StateCommitmentDB = dbs[1] + } + store, err := ctor(dbs[0], opts) require.NoError(t, err) s1 := store.GetKVStore(skey_1) @@ -428,7 +521,7 @@ func TestPruning(t *testing.T) { kept := sliceToSet(tc.kept) for v := uint64(1); v <= 10; v++ { _, has := kept[v] - require.Equal(t, has, versions.Exists(v), "Version = %v; tc #%d", v, tci) + require.Equal(t, has, versions.Exists(v), "version = %v; tc #%d", v, tci) } } } @@ -443,9 +536,9 @@ func TestPruning(t *testing.T) { } db := memdb.NewDB() - opts := simpleStoreConfig(t) + opts := storeParams1(t) opts.Pruning = pruningtypes.NewCustomPruningOptions(0, 10) - store, err := NewStore(db, opts) + store, err := ctor(db, opts) require.NoError(t, err) for i := byte(1); i <= 20; i++ { @@ -471,9 +564,13 @@ func TestPruning(t *testing.T) { } } +func TestQuery(t *testing.T) { + doTestQuery(t, multistoreConstructor) +} + func queryPath(skey types.StoreKey, endp string) string { return "/" + skey.Name() + endp } -func TestQuery(t *testing.T) { +func doTestQuery(t *testing.T, ctor storeConstructor) { k1, v1 := []byte("k1"), []byte("v1") k2, v2 := []byte("k2"), []byte("v2") v3 := []byte("v3") @@ -502,16 +599,15 @@ func TestQuery(t *testing.T) { valExpSub2, err := KVs2.Marshal() require.NoError(t, err) - store, err := NewStore(memdb.NewDB(), simpleStoreConfig(t)) + store, err := NewStore(memdb.NewDB(), storeParams1(t)) require.NoError(t, err) cid := store.Commit() ver := cid.Version - query := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: ver} - querySub := abci.RequestQuery{Path: queryPath(skey_1, "/subspace"), Data: ksub, Height: ver} + querySubspace := abci.RequestQuery{Path: queryPath(skey_1, "/subspace"), Data: ksub, Height: ver} queryHeight0 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1} // query subspace before anything set - qres := store.Query(querySub) + qres := store.Query(querySubspace) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, valExpSubEmpty, qres.Value) @@ -522,25 +618,27 @@ func TestQuery(t *testing.T) { sub.Set(k2, v2) t.Run("basic queries", func(t *testing.T) { + query1 := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: ver} + // set data without commit, doesn't show up - qres = store.Query(query) + qres = store.Query(query1) require.True(t, qres.IsOK(), qres.Log) require.Nil(t, qres.Value) // commit it, but still don't see on old version cid = store.Commit() - qres = store.Query(query) + qres = store.Query(query1) require.True(t, qres.IsOK(), qres.Log) require.Nil(t, qres.Value) // but yes on the new version - query.Height = cid.Version - qres = store.Query(query) + query1.Height = cid.Version + qres = store.Query(query1) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, v1, qres.Value) // and for the subspace - querySub.Height = cid.Version - qres = store.Query(querySub) + querySubspace.Height = cid.Version + qres = store.Query(querySubspace) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, valExpSub1, qres.Value) @@ -549,13 +647,13 @@ func TestQuery(t *testing.T) { cid = store.Commit() // query will return old values, as height is fixed - qres = store.Query(query) + qres = store.Query(query1) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, v1, qres.Value) // update to latest height in the query and we are happy - query.Height = cid.Version - qres = store.Query(query) + query1.Height = cid.Version + qres = store.Query(query1) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, v3, qres.Value) // try other key @@ -564,43 +662,71 @@ func TestQuery(t *testing.T) { require.True(t, qres.IsOK(), qres.Log) require.Equal(t, v2, qres.Value) // and for the subspace - querySub.Height = cid.Version - qres = store.Query(querySub) + querySubspace.Height = cid.Version + qres = store.Query(querySubspace) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, valExpSub2, qres.Value) + }) + + t.Run("different versions", func(t *testing.T) { + stateDB := memdb.NewDB() // default (height 0) will show latest-1 qres = store.Query(queryHeight0) require.True(t, qres.IsOK(), qres.Log) require.Equal(t, v1, qres.Value) - }) - // querying an empty store will fail - store2, err := NewStore(memdb.NewDB(), simpleStoreConfig(t)) - require.NoError(t, err) - qres = store2.Query(queryHeight0) - require.True(t, qres.IsErr()) + // querying an empty store will fail + store, err = NewStore(stateDB, storeParams1(t)) + require.NoError(t, err) + qres = store.Query(queryHeight0) + require.True(t, qres.IsErr()) - // default shows latest, if latest-1 does not exist - store2.GetKVStore(skey_1).Set(k1, v1) - store2.Commit() - qres = store2.Query(queryHeight0) - require.True(t, qres.IsOK(), qres.Log) - require.Equal(t, v1, qres.Value) - store2.Close() + // default (height=0) shows latest, if latest-1 does not exist + store.GetKVStore(skey_1).Set(k1, v1) + cid = store.Commit() + qres = store.Query(queryHeight0) + require.True(t, qres.IsOK(), qres.Log) + require.Equal(t, v1, qres.Value) + require.NoError(t, store.Close()) + + // querying past version succeeds after rename + opts := DefaultStoreParams() + require.NoError(t, opts.RegisterSubstore(skey_2, types.StoreTypePersistent)) + opts.Upgrades = &types.StoreUpgrades{ + Renamed: []types.StoreRename{types.StoreRename{skey_1.Name(), skey_2.Name()}}, + } + store, err = NewStore(stateDB, opts) + require.NoError(t, err) + store.Commit() + query := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: cid.Version} + qres = store.Query(query) + require.True(t, qres.IsOK(), qres.Log) + require.NoError(t, store.Close()) + }) t.Run("failed queries", func(t *testing.T) { + stateDB := memdb.NewDB() + + store, err = NewStore(stateDB, storeParams1(t)) + require.NoError(t, err) + store.GetKVStore(skey_1).Set(k1, v1) + store.Commit() + // artificial error cases for coverage (should never happen with prescribed usage) // ensure that height overflow triggers an error require.NoError(t, err) - store2.stateDB = dbVersionsIs{store2.stateDB, dbm.NewVersionManager([]uint64{uint64(math.MaxInt64) + 1})} - qres = store2.Query(queryHeight0) + store.stateDB = dbVersionsIs{stateDB, dbm.NewVersionManager([]uint64{uint64(math.MaxInt64) + 1})} + qres = store.Query(queryHeight0) require.True(t, qres.IsErr()) // failure to access versions triggers an error - store2.stateDB = dbVersionsFails{store.stateDB} - qres = store2.Query(queryHeight0) + store.stateDB = dbVersionsFails{stateDB} + qres = store.Query(queryHeight0) require.True(t, qres.IsErr()) - store2.Close() + require.NoError(t, store.Close()) + + store, err = NewStore(stateDB, storeParams1(t)) + require.NoError(t, err) // query with a nil or empty key fails badquery := abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: []byte{}} @@ -610,7 +736,11 @@ func TestQuery(t *testing.T) { qres = store.Query(badquery) require.True(t, qres.IsErr()) // querying an invalid height will fail - badquery = abci.RequestQuery{Path: queryPath(skey_1, "/key"), Data: k1, Height: store.LastCommitID().Version + 1} + badquery = abci.RequestQuery{ + Path: queryPath(skey_1, "/key"), + Data: k1, + Height: store.LastCommitID().Version + 1, + } qres = store.Query(badquery) require.True(t, qres.IsErr()) // or an invalid path @@ -629,55 +759,27 @@ func TestQuery(t *testing.T) { require.NotNil(t, qres.ProofOps) } testProve() - store.Close() + require.NoError(t, store.Close()) - opts := simpleStoreConfig(t) + opts := storeParams1(t) opts.StateCommitmentDB = memdb.NewDB() store, err = NewStore(memdb.NewDB(), opts) require.NoError(t, err) store.GetKVStore(skey_1).Set(k1, v1) store.Commit() testProve() - store.Close() + require.NoError(t, store.Close()) }) } -func TestStoreConfig(t *testing.T) { - opts := DefaultStoreConfig() - // Fail with invalid types - require.Error(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypeDB)) - require.Error(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypeSMT)) - // Ensure that no prefix conflicts are allowed - require.NoError(t, opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent)) - require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory)) - require.NoError(t, opts.RegisterSubstore(skey_3b.Name(), types.StoreTypeTransient)) - require.Error(t, opts.RegisterSubstore(skey_1b.Name(), types.StoreTypePersistent)) - require.Error(t, opts.RegisterSubstore(skey_2b.Name(), types.StoreTypePersistent)) - require.Error(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypePersistent)) -} - -func TestMultiStoreBasic(t *testing.T) { - opts := DefaultStoreConfig() - err := opts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent) - require.NoError(t, err) - db := memdb.NewDB() - store, err := NewStore(db, opts) - require.NoError(t, err) - - store_1 := store.GetKVStore(skey_1) - require.NotNil(t, store_1) - store_1.Set([]byte{0}, []byte{0}) - val := store_1.Get([]byte{0}) - require.Equal(t, []byte{0}, val) - store_1.Delete([]byte{0}) - val = store_1.Get([]byte{0}) - require.Equal(t, []byte(nil), val) +func TestGetVersion(t *testing.T) { + doTestGetVersion(t, multistoreConstructor) } -func TestGetVersion(t *testing.T) { +func doTestGetVersion(t *testing.T, ctor storeConstructor) { db := memdb.NewDB() - opts := storeConfig123(t) - store, err := NewStore(db, opts) + opts := storeParams123(t) + store, err := ctor(db, opts) require.NoError(t, err) cid := store.Commit() @@ -709,10 +811,14 @@ func TestGetVersion(t *testing.T) { require.Equal(t, []byte{0}, subview.Get([]byte{0})) } -func TestMultiStoreMigration(t *testing.T) { +func TestStoreSchemaMigration(t *testing.T) { + doTestStoreSchemaMigration(t, multistoreConstructor) +} + +func doTestStoreSchemaMigration(t *testing.T, ctor storeConstructor) { db := memdb.NewDB() - opts := storeConfig123(t) - store, err := NewStore(db, opts) + opts := storeParams123(t) + store, err := ctor(db, opts) require.NoError(t, err) // write some data in all stores @@ -739,7 +845,7 @@ func TestMultiStoreMigration(t *testing.T) { var migratedID types.CommitID // Load without changes and make sure it is sensible - store, err = NewStore(db, opts) + store, err = ctor(db, opts) require.NoError(t, err) // let's query data to see it was saved properly @@ -750,20 +856,30 @@ func TestMultiStoreMigration(t *testing.T) { t.Run("basic migration", func(t *testing.T) { // now, let's load with upgrades... - opts.Upgrades = []types.StoreUpgrades{ - { - Added: []string{skey_4.Name()}, - Renamed: []types.StoreRename{{ - OldKey: skey_2.Name(), - NewKey: skey_2b.Name(), - }}, - Deleted: []string{skey_3.Name()}, - }, + upgrades := &types.StoreUpgrades{ + Added: []string{skey_4.Name()}, + Renamed: []types.StoreRename{{ + OldKey: skey_2.Name(), + NewKey: skey_2b.Name(), + }}, + Deleted: []string{skey_3.Name()}, } - store, err = NewStore(db, opts) - require.Nil(t, err) - // s1 was not changed + // store must be loaded with post-migration schema, so this fails + opts := storeParams123(t) + opts.Upgrades = upgrades + store, err = ctor(db, opts) + require.Error(t, err) + + opts = DefaultStoreParams() + opts.Upgrades = upgrades + require.NoError(t, opts.RegisterSubstore(skey_1, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_2b, types.StoreTypePersistent)) + require.NoError(t, opts.RegisterSubstore(skey_4, types.StoreTypePersistent)) + store, err = ctor(db, opts) + require.NoError(t, err) + + // store1 was not changed s1 = store.GetKVStore(skey_1) require.NotNil(t, s1) require.Equal(t, v1, s1.Get(k1)) @@ -799,19 +915,16 @@ func TestMultiStoreMigration(t *testing.T) { t.Run("reload after migrations", func(t *testing.T) { // fail to load the migrated store with the old schema - store, err = NewStore(db, storeConfig123(t)) + store, err = ctor(db, storeParams123(t)) require.Error(t, err) // pass in a schema reflecting the migrations - migratedOpts := DefaultStoreConfig() - err = migratedOpts.RegisterSubstore(skey_1.Name(), types.StoreTypePersistent) - require.NoError(t, err) - err = migratedOpts.RegisterSubstore(skey_2b.Name(), types.StoreTypePersistent) + migratedOpts := DefaultStoreParams() + require.NoError(t, migratedOpts.RegisterSubstore(skey_1, types.StoreTypePersistent)) + require.NoError(t, migratedOpts.RegisterSubstore(skey_2b, types.StoreTypePersistent)) + require.NoError(t, migratedOpts.RegisterSubstore(skey_4, types.StoreTypePersistent)) + store, err = ctor(db, migratedOpts) require.NoError(t, err) - err = migratedOpts.RegisterSubstore(skey_4.Name(), types.StoreTypePersistent) - require.NoError(t, err) - store, err = NewStore(db, migratedOpts) - require.Nil(t, err) require.Equal(t, migratedID, store.LastCommitID()) // query this new store @@ -852,31 +965,35 @@ func TestMultiStoreMigration(t *testing.T) { } func TestTrace(t *testing.T) { + doTestTrace(t, multistoreConstructor) +} + +func doTestTrace(t *testing.T, ctor storeConstructor) { key, value := []byte("test-key"), []byte("test-value") - tctx := types.TraceContext(map[string]interface{}{"blockHeight": 64}) + tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) - expected_Set := "{\"operation\":\"write\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n" - expected_Get := "{\"operation\":\"read\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n" - expected_Get_missing := "{\"operation\":\"read\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n" - expected_Delete := "{\"operation\":\"delete\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n" - expected_IterKey := "{\"operation\":\"iterKey\",\"key\":\"dGVzdC1rZXk=\",\"value\":\"\",\"metadata\":{\"blockHeight\":64}}\n" - expected_IterValue := "{\"operation\":\"iterValue\",\"key\":\"\",\"value\":\"dGVzdC12YWx1ZQ==\",\"metadata\":{\"blockHeight\":64}}\n" + expected_Set := `{"operation":"write","key":"dGVzdC1rZXk=","value":"dGVzdC12YWx1ZQ==","metadata":{"blockHeight":64}}` + "\n" + expected_Get := `{"operation":"read","key":"dGVzdC1rZXk=","value":"dGVzdC12YWx1ZQ==","metadata":{"blockHeight":64}}` + "\n" + expected_Get_missing := `{"operation":"read","key":"dGVzdC1rZXk=","value":"","metadata":{"blockHeight":64}}` + "\n" + expected_Delete := `{"operation":"delete","key":"dGVzdC1rZXk=","value":"","metadata":{"blockHeight":64}}` + "\n" + expected_IterKey := `{"operation":"iterKey","key":"dGVzdC1rZXk=","value":"","metadata":{"blockHeight":64}}` + "\n" + expected_IterValue := `{"operation":"iterValue","key":"","value":"dGVzdC12YWx1ZQ==","metadata":{"blockHeight":64}}` + "\n" db := memdb.NewDB() - opts := simpleStoreConfig(t) - require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory)) - require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypeTransient)) + opts := storeParams1(t) + require.NoError(t, opts.RegisterSubstore(skey_mem1, types.StoreTypeMemory)) + require.NoError(t, opts.RegisterSubstore(skey_tran1, types.StoreTypeTransient)) - store, err := NewStore(db, opts) + store, err := ctor(db, opts) require.NoError(t, err) - store.SetTraceContext(tctx) + store.SetTracingContext(tc) require.False(t, store.TracingEnabled()) var buf bytes.Buffer store.SetTracer(&buf) require.True(t, store.TracingEnabled()) - for _, skey := range []types.StoreKey{skey_1, skey_2, skey_3} { + for _, skey := range []types.StoreKey{skey_1, skey_mem1, skey_tran1} { buf.Reset() store.GetKVStore(skey).Get(key) require.Equal(t, expected_Get_missing, buf.String()) @@ -901,14 +1018,69 @@ func TestTrace(t *testing.T) { buf.Reset() store.GetKVStore(skey).Delete(key) require.Equal(t, expected_Delete, buf.String()) - } + store.SetTracer(nil) require.False(t, store.TracingEnabled()) require.NoError(t, store.Close()) } +func TestTraceConcurrency(t *testing.T) { + doTestTraceConcurrency(t, multistoreConstructor) +} + +func doTestTraceConcurrency(t *testing.T, ctor storeConstructor) { + db := memdb.NewDB() + opts := storeParams123(t) + store, err := ctor(db, opts) + require.NoError(t, err) + + b := &bytes.Buffer{} + tc := types.TraceContext(map[string]interface{}{"blockHeight": 64}) + + store.SetTracer(b) + store.SetTracingContext(tc) + + cms := store.CacheWrap() + s1 := cms.GetKVStore(skey_1) + require.NotNil(t, s1) + + stop := make(chan struct{}) + stopW := make(chan struct{}) + + go func(stop chan struct{}) { + for { + select { + case <-stop: + return + default: + s1.Set([]byte{1}, []byte{1}) + cms.Write() + } + } + }(stop) + + go func(stop chan struct{}) { + for { + select { + case <-stop: + return + default: + store.SetTracingContext(tc) + } + } + }(stopW) + + time.Sleep(1 * time.Second) + stop <- struct{}{} + stopW <- struct{}{} +} + func TestListeners(t *testing.T) { + doTestListeners(t, multistoreConstructor) +} + +func doTestListeners(t *testing.T, ctor storeConstructor) { kvPairs := []types.KVPair{ {Key: []byte{1}, Value: []byte("v1")}, {Key: []byte{2}, Value: []byte("v2")}, @@ -928,12 +1100,12 @@ func TestListeners(t *testing.T) { { key: kvPairs[1].Key, value: kvPairs[1].Value, - skey: skey_2, + skey: skey_mem1, }, { key: kvPairs[2].Key, value: kvPairs[2].Value, - skey: skey_3, + skey: skey_tran1, }, } @@ -941,11 +1113,11 @@ func TestListeners(t *testing.T) { marshaller := codec.NewProtoCodec(interfaceRegistry) db := memdb.NewDB() - opts := simpleStoreConfig(t) - require.NoError(t, opts.RegisterSubstore(skey_2.Name(), types.StoreTypeMemory)) - require.NoError(t, opts.RegisterSubstore(skey_3.Name(), types.StoreTypeTransient)) + opts := storeParams1(t) + require.NoError(t, opts.RegisterSubstore(skey_mem1, types.StoreTypeMemory)) + require.NoError(t, opts.RegisterSubstore(skey_tran1, types.StoreTypeTransient)) - store, err := NewStore(db, opts) + store, err := ctor(db, opts) require.NoError(t, err) for i, tc := range testCases { diff --git a/store/v2alpha1/multi/sub_store.go b/store/v2alpha1/multi/sub_store.go index 33df955b920c..c62b107a907b 100644 --- a/store/v2alpha1/multi/sub_store.go +++ b/store/v2alpha1/multi/sub_store.go @@ -1,16 +1,13 @@ package multi import ( - "crypto/sha256" "io" - "sync" - dbm "github.com/cosmos/cosmos-sdk/db" dbutil "github.com/cosmos/cosmos-sdk/internal/db" "github.com/cosmos/cosmos-sdk/store/cachekv" "github.com/cosmos/cosmos-sdk/store/listenkv" "github.com/cosmos/cosmos-sdk/store/tracekv" - "github.com/cosmos/cosmos-sdk/store/types" + types "github.com/cosmos/cosmos-sdk/store/v2alpha1" ) // Get implements KVStore. @@ -47,47 +44,24 @@ func (s *substore) Set(key, value []byte) { panic(err) } s.stateCommitmentStore.Set(key, value) - khash := sha256.Sum256(key) - err = s.indexBucket.Set(khash[:], key) - if err != nil { - panic(err) - } } // Delete implements KVStore. func (s *substore) Delete(key []byte) { - khash := sha256.Sum256(key) s.root.mtx.Lock() defer s.root.mtx.Unlock() s.stateCommitmentStore.Delete(key) - _ = s.indexBucket.Delete(khash[:]) _ = s.dataBucket.Delete(key) } -type contentsIterator struct { - types.Iterator - locker sync.Locker -} - -func (s *substore) newSubstoreIterator(source dbm.Iterator) *contentsIterator { - locker := s.root.mtx.RLocker() - locker.Lock() - return &contentsIterator{dbutil.ToStoreIterator(source), locker} -} - -func (it *contentsIterator) Close() error { - defer it.locker.Unlock() - return it.Iterator.Close() -} - // Iterator implements KVStore. func (s *substore) Iterator(start, end []byte) types.Iterator { iter, err := s.dataBucket.Iterator(start, end) if err != nil { panic(err) } - return s.newSubstoreIterator(iter) + return dbutil.ToStoreIterator(iter) } // ReverseIterator implements KVStore. @@ -96,7 +70,7 @@ func (s *substore) ReverseIterator(start, end []byte) types.Iterator { if err != nil { panic(err) } - return s.newSubstoreIterator(iter) + return dbutil.ToStoreIterator(iter) } // GetStoreType implements Store. diff --git a/store/v2alpha1/multi/v1asv2.go b/store/v2alpha1/multi/v1asv2.go new file mode 100644 index 000000000000..ee25df37c4a9 --- /dev/null +++ b/store/v2alpha1/multi/v1asv2.go @@ -0,0 +1,167 @@ +package multi + +import ( + "io" + + "github.com/tendermint/tendermint/libs/log" + + "github.com/cosmos/cosmos-sdk/db" + "github.com/cosmos/cosmos-sdk/db/memdb" + dbutil "github.com/cosmos/cosmos-sdk/internal/db" + "github.com/cosmos/cosmos-sdk/pruning" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + v1 "github.com/cosmos/cosmos-sdk/store/types" + v2 "github.com/cosmos/cosmos-sdk/store/v2alpha1" +) + +var ( + _ v2.CommitMultiStore = (*store1as2)(nil) + _ v2.Queryable = (*store1as2)(nil) + _ v2.CacheMultiStore = cacheStore1as2{} +) + +type store1as2 struct { + *rootmulti.Store + database *dbutil.TmdbConnAdapter + // Mirror the pruning state in rootmulti.Store + pruner *pruning.Manager + pruneDB *memdb.MemDB +} + +type cacheStore1as2 struct { + v1.CacheMultiStore +} + +type viewStore1as2 struct{ cacheStore1as2 } + +type readonlyKVStore struct { + v2.KVStore +} + +// NewV1MultiStoreAsV2 constructs a v1 CommitMultiStore from v2.StoreParams +func NewV1MultiStoreAsV2(database db.Connection, opts StoreParams) (v2.CommitMultiStore, error) { + adapter := dbutil.ConnectionAsTmdb(database) + cms := rootmulti.NewStore(adapter, log.NewNopLogger()) + for name, typ := range opts.StoreSchema { + switch typ { + case v2.StoreTypePersistent: + typ = v1.StoreTypeIAVL + } + skey, err := opts.storeKey(name) + if err != nil { + return nil, err + } + cms.MountStoreWithDB(skey, typ, nil) + } + cms.SetPruning(opts.Pruning) + pruner := pruning.NewManager() + pruner.SetOptions(opts.Pruning) + pruner.LoadPruningHeights(adapter) + + err := cms.SetInitialVersion(int64(opts.InitialVersion)) + if err != nil { + return nil, err + } + err = cms.LoadLatestVersionAndUpgrade(opts.Upgrades) + if err != nil { + return nil, err + } + for skey, ls := range opts.listeners { + cms.AddListeners(skey, ls) + } + cms.SetTracer(opts.TraceWriter) + cms.SetTracingContext(opts.TraceContext) + return &store1as2{ + Store: cms, + database: adapter, + pruner: pruner, + pruneDB: memdb.NewDB(), + }, nil +} + +// MultiStore + +func (s *store1as2) CacheWrap() v2.CacheMultiStore { + return cacheStore1as2{s.CacheMultiStore()} +} + +func (s *store1as2) GetVersion(ver int64) (v2.MultiStore, error) { + ret, err := s.CacheMultiStoreWithVersion(ver) + versions, err := s.database.Connection.Versions() + if err != nil { + return nil, err + } + if !versions.Exists(uint64(ver)) { + return nil, db.ErrVersionDoesNotExist + } + return viewStore1as2{cacheStore1as2{ret}}, err +} + +func (s *store1as2) HasKVStore(skey v2.StoreKey) bool { + return s.Store.GetCommitKVStore(skey) != nil +} + +// CommitMultiStore + +func (s *store1as2) Close() error { + return s.database.CloseTx() +} + +func (s *store1as2) Commit() v2.CommitID { + cid := s.Store.Commit() + _, err := s.database.Commit() + if err != nil { + panic(err) + } + + db := dbutil.ConnectionAsTmdb(s.pruneDB) + s.pruner.HandleHeight(cid.Version-1, db) + if !s.pruner.ShouldPruneAtHeight(cid.Version) { + return cid + } + pruningHeights, err := s.pruner.GetFlushAndResetPruningHeights(db) + if err != nil { + panic(err) + } + pruneVersions(pruningHeights, func(ver int64) error { + return s.database.Connection.DeleteVersion(uint64(ver)) + }) + return cid +} + +func (s *store1as2) SetInitialVersion(ver uint64) error { + return s.Store.SetInitialVersion(int64(ver)) +} + +func (s *store1as2) SetTracer(w io.Writer) { s.Store.SetTracer(w) } +func (s *store1as2) SetTracingContext(tc v2.TraceContext) { s.Store.SetTracingContext(tc) } + +func (s *store1as2) GetAllVersions() []int { panic("unsupported: GetAllVersions") } + +// CacheMultiStore + +func (s cacheStore1as2) CacheWrap() v2.CacheMultiStore { + return cacheStore1as2{s.CacheMultiStore.CacheMultiStore()} +} + +func (s cacheStore1as2) SetTracer(w io.Writer) { s.CacheMultiStore.SetTracer(w) } +func (s cacheStore1as2) SetTracingContext(tc v2.TraceContext) { + s.CacheMultiStore.SetTracingContext(tc) +} + +func (s cacheStore1as2) HasKVStore(skey v2.StoreKey) bool { + return s.CacheMultiStore.GetKVStore(skey) != nil +} + +func (s viewStore1as2) GetKVStore(skey v2.StoreKey) v2.KVStore { + sub := s.CacheMultiStore.GetKVStore(skey) + return readonlyKVStore{sub} +} + +func (kv readonlyKVStore) Set(key []byte, value []byte) { + panic(ErrReadOnly) +} + +func (kv readonlyKVStore) Delete(key []byte) { + panic(ErrReadOnly) +} diff --git a/store/v2alpha1/multi/v1asv2_test.go b/store/v2alpha1/multi/v1asv2_test.go new file mode 100644 index 000000000000..9e1c057712a0 --- /dev/null +++ b/store/v2alpha1/multi/v1asv2_test.go @@ -0,0 +1,29 @@ +package multi + +import ( + "testing" +) + +func TestV1asV2MultiStoreBasic(t *testing.T) { + doTestMultiStoreBasic(t, NewV1MultiStoreAsV2) +} + +func TestV1asV2GetVersion(t *testing.T) { + doTestGetVersion(t, NewV1MultiStoreAsV2) +} + +func TestV1asV2Pruning(t *testing.T) { + doTestPruning(t, NewV1MultiStoreAsV2, false) +} + +func TestV1asV2Trace(t *testing.T) { + doTestTrace(t, NewV1MultiStoreAsV2) +} + +func TestV1asV2TraceConcurrency(t *testing.T) { + doTestTraceConcurrency(t, NewV1MultiStoreAsV2) +} + +func TestV1asV2Listeners(t *testing.T) { + doTestListeners(t, NewV1MultiStoreAsV2) +} diff --git a/store/v2alpha1/multi/view_store.go b/store/v2alpha1/multi/view_store.go index 8117a1c41148..9420ccc68b92 100644 --- a/store/v2alpha1/multi/view_store.go +++ b/store/v2alpha1/multi/view_store.go @@ -17,6 +17,65 @@ import ( var ErrReadOnly = errors.New("cannot modify read-only store") +// Read-only store for querying past versions +type viewStore struct { + stateView dbm.Reader + stateCommitmentView dbm.Reader + substoreCache map[string]*viewSubstore + schema StoreSchema +} + +type viewSubstore struct { + root *viewStore + name string + dataBucket dbm.Reader + stateCommitmentStore *smt.Store +} + +func (s *viewStore) GetKVStore(skey types.StoreKey) types.KVStore { + key := skey.Name() + if _, has := s.schema[key]; !has { + panic(ErrStoreNotFound(key)) + } + ret, err := s.getSubstore(key) + if err != nil { + panic(err) + } + s.substoreCache[key] = ret + return ret +} + +func (vs *viewStore) HasKVStore(skey types.StoreKey) bool { + _, has := vs.schema[skey.Name()] + return has +} + +// Reads but does not update substore cache +func (s *viewStore) getSubstore(key string) (*viewSubstore, error) { + if cached, has := s.substoreCache[key]; has { + return cached, nil + } + pfx := prefixSubstore(key) + stateR := prefixdb.NewReader(s.stateView, pfx) + stateCommitmentR := prefixdb.NewReader(s.stateCommitmentView, pfx) + rootHash, err := stateR.Get(merkleRootKey) + if err != nil { + return nil, err + } + return &viewSubstore{ + root: s, + name: key, + dataBucket: prefixdb.NewReader(stateR, dataPrefix), + stateCommitmentStore: loadSMT(dbm.ReaderAsReadWriter(stateCommitmentR), rootHash), + }, nil +} + +// CacheWrap implements MultiStore. +// Because this store is a read-only view, the returned store's Write operation is a no-op. +func (s *viewStore) CacheWrap() types.CacheMultiStore { + return noopCacheStore{newCacheStore(s)} +} + func (s *viewSubstore) GetStateCommitmentStore() *smt.Store { return s.stateCommitmentStore } @@ -142,37 +201,3 @@ func (store *Store) getView(version int64) (ret *viewStore, err error) { } return ret, err } - -func (s *viewStore) GetKVStore(skey types.StoreKey) types.KVStore { - key := skey.Name() - if _, has := s.schema[key]; !has { - panic(ErrStoreNotFound(key)) - } - ret, err := s.getSubstore(key) - if err != nil { - panic(err) - } - s.substoreCache[key] = ret - return ret -} - -// Reads but does not update substore cache -func (s *viewStore) getSubstore(key string) (*viewSubstore, error) { - if cached, has := s.substoreCache[key]; has { - return cached, nil - } - pfx := substorePrefix(key) - stateR := prefixdb.NewReader(s.stateView, pfx) - stateCommitmentR := prefixdb.NewReader(s.stateCommitmentView, pfx) - rootHash, err := stateR.Get(merkleRootKey) - if err != nil { - return nil, err - } - return &viewSubstore{ - root: s, - name: key, - dataBucket: prefixdb.NewReader(stateR, dataPrefix), - indexBucket: prefixdb.NewReader(stateR, indexPrefix), - stateCommitmentStore: loadSMT(dbm.ReaderAsReadWriter(stateCommitmentR), rootHash), - }, nil -} diff --git a/store/v2alpha1/smt/ics23_test.go b/store/v2alpha1/smt/ics23_test.go index a94cc6f0a477..5667dbe16e78 100644 --- a/store/v2alpha1/smt/ics23_test.go +++ b/store/v2alpha1/smt/ics23_test.go @@ -6,6 +6,7 @@ import ( ics23 "github.com/confio/ics23/go" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/cosmos/cosmos-sdk/db/memdb" store "github.com/cosmos/cosmos-sdk/store/v2alpha1/smt" @@ -33,7 +34,7 @@ func TestProofICS23(t *testing.T) { nonexist := proof.GetNonexist() assert.Nil(t, nonexist) exist := proof.GetExist() - assert.NotNil(t, exist) + require.NotNil(t, exist) assert.Equal(t, 0, len(exist.Path)) assert.NoError(t, exist.Verify(ics23.SmtSpec, s.Root(), path01[:], val1)) @@ -41,18 +42,18 @@ func TestProofICS23(t *testing.T) { proof, err = s.GetProofICS23(key00) // When leaf is leftmost node assert.NoError(t, err) nonexist = proof.GetNonexist() - assert.NotNil(t, nonexist) + require.NotNil(t, nonexist) assert.Nil(t, nonexist.Left) assert.Equal(t, path00[:], nonexist.Key) - assert.NotNil(t, nonexist.Right) + require.NotNil(t, nonexist.Right) assert.Equal(t, 0, len(nonexist.Right.Path)) assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path00[:])) proof, err = s.GetProofICS23(key10) // When rightmost assert.NoError(t, err) nonexist = proof.GetNonexist() - assert.NotNil(t, nonexist) - assert.NotNil(t, nonexist.Left) + require.NotNil(t, nonexist) + require.NotNil(t, nonexist.Left) assert.Equal(t, 0, len(nonexist.Left.Path)) assert.Nil(t, nonexist.Right) assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path10[:])) @@ -63,11 +64,11 @@ func TestProofICS23(t *testing.T) { proof, err = s.GetProofICS23(key10) // In between two keys assert.NoError(t, err) nonexist = proof.GetNonexist() - assert.NotNil(t, nonexist) + require.NotNil(t, nonexist) assert.Equal(t, path10[:], nonexist.Key) - assert.NotNil(t, nonexist.Left) + require.NotNil(t, nonexist.Left) assert.Equal(t, 1, len(nonexist.Left.Path)) - assert.NotNil(t, nonexist.Right) + require.NotNil(t, nonexist.Right) assert.Equal(t, 1, len(nonexist.Right.Path)) assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path10[:])) @@ -78,9 +79,9 @@ func TestProofICS23(t *testing.T) { assert.NoError(t, err) nonexist = proof.GetNonexist() assert.Equal(t, path10[:], nonexist.Key) - assert.NotNil(t, nonexist.Left) + require.NotNil(t, nonexist.Left) assert.Equal(t, 1, len(nonexist.Left.Path)) - assert.NotNil(t, nonexist.Right) + require.NotNil(t, nonexist.Right) assert.Equal(t, 1, len(nonexist.Right.Path)) assert.NoError(t, nonexist.Verify(ics23.SmtSpec, s.Root(), path10[:])) diff --git a/store/v2alpha1/smt/store.go b/store/v2alpha1/smt/store.go index deba9ff18b91..ea902ff1c58b 100644 --- a/store/v2alpha1/smt/store.go +++ b/store/v2alpha1/smt/store.go @@ -6,7 +6,7 @@ import ( dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/db/prefix" - "github.com/cosmos/cosmos-sdk/store/types" + types "github.com/cosmos/cosmos-sdk/store/v2alpha1" ics23 "github.com/confio/ics23/go" "github.com/lazyledger/smt" @@ -27,10 +27,10 @@ var ( errValueNil = errors.New("value is nil") ) -// Store Implements types.KVStore and CommitKVStore. +// Store Implements types.BasicKVStore. type Store struct { tree *smt.SparseMerkleTree - values dbm.ReadWriter + values dbm.Reader // Map hashed keys back to preimage preimages dbm.ReadWriter } @@ -141,3 +141,14 @@ func (ms dbMapStore) Get(key []byte) ([]byte, error) { } return val, nil } + +func (ms dbMapStore) Delete(key []byte) error { + has, err := ms.ReadWriter.Has(key) + if err != nil { + return err + } + if !has { + return &smt.InvalidKeyError{key} + } + return ms.ReadWriter.Delete(key) +} diff --git a/store/v2alpha1/smt/store_test.go b/store/v2alpha1/smt/store_test.go index 2ad50be53dda..3024343fbf95 100644 --- a/store/v2alpha1/smt/store_test.go +++ b/store/v2alpha1/smt/store_test.go @@ -19,6 +19,8 @@ func TestGetSetHasDelete(t *testing.T) { s.Delete([]byte("foo")) assert.Equal(t, false, s.Has([]byte("foo"))) + assert.NotPanics(t, func() { s.Delete([]byte("foo")) }, "Delete of nonexistent key should not panic") + assert.Panics(t, func() { s.Get(nil) }, "Get(nil key) should panic") assert.Panics(t, func() { s.Get([]byte{}) }, "Get(empty key) should panic") assert.Panics(t, func() { s.Has(nil) }, "Has(nil key) should panic") diff --git a/store/v2alpha1/types.go b/store/v2alpha1/types.go index 44c97ff4f52d..fed95acb088d 100644 --- a/store/v2alpha1/types.go +++ b/store/v2alpha1/types.go @@ -9,30 +9,26 @@ import ( // Re-export relevant original store types type ( - StoreKey = v1.StoreKey - StoreType = v1.StoreType - CommitID = v1.CommitID - StoreUpgrades = v1.StoreUpgrades - StoreRename = v1.StoreRename - Iterator = v1.Iterator - - TraceContext = v1.TraceContext - WriteListener = v1.WriteListener - - BasicKVStore = v1.BasicKVStore - KVStore = v1.KVStore - Committer = v1.Committer - CommitKVStore = v1.CommitKVStore - CacheKVStore = v1.CacheKVStore - Queryable = v1.Queryable - CacheWrap = v1.CacheWrap - + StoreKey = v1.StoreKey + StoreType = v1.StoreType + CommitID = v1.CommitID + StoreUpgrades = v1.StoreUpgrades + StoreRename = v1.StoreRename + Iterator = v1.Iterator + TraceContext = v1.TraceContext + WriteListener = v1.WriteListener + BasicKVStore = v1.BasicKVStore + KVStore = v1.KVStore + Committer = v1.Committer + CommitKVStore = v1.CommitKVStore + CacheKVStore = v1.CacheKVStore + Queryable = v1.Queryable + CacheWrap = v1.CacheWrap KVStoreKey = v1.KVStoreKey MemoryStoreKey = v1.MemoryStoreKey TransientStoreKey = v1.TransientStoreKey - - KVPair = v1.KVPair - StoreKVPair = v1.StoreKVPair + KVPair = v1.KVPair + StoreKVPair = v1.StoreKVPair ) // Re-export relevant constants, values and utility functions @@ -45,33 +41,38 @@ const ( ) var ( - NewKVStoreKey = v1.NewKVStoreKey - PrefixEndBytes = v1.PrefixEndBytes - KVStorePrefixIterator = v1.KVStorePrefixIterator - KVStoreReversePrefixIterator = v1.KVStoreReversePrefixIterator - - NewStoreKVPairWriteListener = v1.NewStoreKVPairWriteListener - + NewKVStoreKey = v1.NewKVStoreKey + NewMemoryStoreKey = v1.NewMemoryStoreKey + NewTransientStoreKey = v1.NewTransientStoreKey + PrefixEndBytes = v1.PrefixEndBytes + KVStorePrefixIterator = v1.KVStorePrefixIterator + KVStoreReversePrefixIterator = v1.KVStoreReversePrefixIterator + NewStoreKVPairWriteListener = v1.NewStoreKVPairWriteListener + AssertValidKey = v1.AssertValidKey + AssertValidValue = v1.AssertValidValue + CommitmentOpDecoder = v1.CommitmentOpDecoder + ProofOpFromMap = v1.ProofOpFromMap ProofOpSMTCommitment = v1.ProofOpSMTCommitment ProofOpSimpleMerkleCommitment = v1.ProofOpSimpleMerkleCommitment - - CommitmentOpDecoder = v1.CommitmentOpDecoder - ProofOpFromMap = v1.ProofOpFromMap - NewSmtCommitmentOp = v1.NewSmtCommitmentOp + NewSmtCommitmentOp = v1.NewSmtCommitmentOp ) -// BasicMultiStore defines a minimal interface for accessing root state. -type BasicMultiStore interface { +// MultiStore defines a minimal interface for accessing root state. +type MultiStore interface { + // Returns true iff the store key is present in the schema. + HasKVStore(StoreKey) bool // Returns a KVStore which has access only to the namespace of the StoreKey. // Panics if the key is not found in the schema. GetKVStore(StoreKey) KVStore + // Returns a branched store whose modifications are later merged back in. + CacheWrap() CacheMultiStore } // mixin interface for trace and listen methods type rootStoreTraceListen interface { TracingEnabled() bool SetTracer(w io.Writer) - SetTraceContext(TraceContext) + SetTracingContext(TraceContext) ListeningEnabled(key StoreKey) bool AddListeners(key StoreKey, listeners []WriteListener) } @@ -79,33 +80,29 @@ type rootStoreTraceListen interface { // CommitMultiStore defines a complete interface for persistent root state, including // (read-only) access to past versions, pruning, trace/listen, and state snapshots. type CommitMultiStore interface { - BasicMultiStore + MultiStore rootStoreTraceListen Committer snapshottypes.Snapshotter // Gets a read-only view of the store at a specific version. // Returns an error if the version is not found. - GetVersion(int64) (BasicMultiStore, error) + GetVersion(int64) (MultiStore, error) // Closes the store and all backing transactions. Close() error - // Returns a branched whose modifications are later merged back in. - CacheMultiStore() CacheMultiStore // Defines the minimum version number that can be saved by this store. SetInitialVersion(uint64) error } // CacheMultiStore defines a branch of the root state which can be written back to the source store. type CacheMultiStore interface { - BasicMultiStore + MultiStore rootStoreTraceListen - // Returns a branched whose modifications are later merged back in. - CacheMultiStore() CacheMultiStore // Write all cached changes back to the source store. Note: this overwrites any intervening changes. Write() } // MultiStorePersistentCache provides inter-block (persistent) caching capabilities for a CommitMultiStore. -// TODO: placeholder. Implement and redefine this +// TODO: placeholder, not implemented yet, nor used in store type MultiStorePersistentCache = v1.MultiStorePersistentCache diff --git a/store/v2alpha1/utils.go b/store/v2alpha1/utils.go new file mode 100644 index 000000000000..7cd2f5ca9133 --- /dev/null +++ b/store/v2alpha1/utils.go @@ -0,0 +1,19 @@ +package types + +import ( + "fmt" +) + +func StoreKeyToType(key StoreKey) (typ StoreType, err error) { + switch key.(type) { + case *KVStoreKey: + typ = StoreTypePersistent + case *MemoryStoreKey: + typ = StoreTypeMemory + case *TransientStoreKey: + typ = StoreTypeTransient + default: + err = fmt.Errorf("unrecognized store key type: %T", key) + } + return +} diff --git a/testutil/context.go b/testutil/context.go index d422dd33b642..5007921be222 100644 --- a/testutil/context.go +++ b/testutil/context.go @@ -6,40 +6,51 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/store" storetypes "github.com/cosmos/cosmos-sdk/store/types" + stypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" sdk "github.com/cosmos/cosmos-sdk/types" ) // DefaultContext creates a sdk.Context with a fresh MemDB that can be used in tests. -func DefaultContext(key storetypes.StoreKey, tkey storetypes.StoreKey) sdk.Context { - db := dbm.NewMemDB() - cms := store.NewCommitMultiStore(db) - cms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) - cms.MountStoreWithDB(tkey, storetypes.StoreTypeTransient, db) - err := cms.LoadLatestVersion() +func DefaultContext(key, tkey stypes.StoreKey) (ret sdk.Context) { + var err error + defer func() { + if err != nil { + panic(err) + } + }() + db := memdb.NewDB() + opts := multi.DefaultStoreParams() + if err = opts.RegisterSubstore(key, stypes.StoreTypePersistent); err != nil { + return + } + if err = opts.RegisterSubstore(tkey, stypes.StoreTypeTransient); err != nil { + return + } + rs, err := multi.NewV1MultiStoreAsV2(db, opts) if err != nil { - panic(err) + return } - ctx := sdk.NewContext(cms, tmproto.Header{}, false, log.NewNopLogger()) - - return ctx + ret = sdk.NewContext(rs.CacheWrap(), tmproto.Header{}, false, log.NewNopLogger()) + return } type TestContext struct { Ctx sdk.Context - DB *dbm.MemDB + DB *memdb.MemDB CMS store.CommitMultiStore } -func DefaultContextWithDB(t *testing.T, key storetypes.StoreKey, tkey storetypes.StoreKey) TestContext { - db := dbm.NewMemDB() - cms := store.NewCommitMultiStore(db) - cms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) - cms.MountStoreWithDB(tkey, storetypes.StoreTypeTransient, db) - err := cms.LoadLatestVersion() +func DefaultContextWithDB(t *testing.T, key store.Key, tkey storetypes.StoreKey) TestContext { + db := memdb.NewDB() + opts := multi.DefaultStoreParams() + assert.NoError(t, opts.RegisterSubstore(key, stypes.StoreTypePersistent)) + assert.NoError(t, opts.RegisterSubstore(tkey, stypes.StoreTypeTransient)) + cms, err := multi.NewV1MultiStoreAsV2(db, opts) assert.NoError(t, err) ctx := sdk.NewContext(cms, tmproto.Header{}, false, log.NewNopLogger()) diff --git a/testutil/mock/param_store.go b/testutil/mock/param_store.go new file mode 100644 index 000000000000..bb544c32ab31 --- /dev/null +++ b/testutil/mock/param_store.go @@ -0,0 +1,49 @@ +package mock + +import ( + "encoding/json" + + "github.com/cosmos/cosmos-sdk/db" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ParamStore struct { + Txn db.ReadWriter +} + +func NewParamStore(db db.Connection) *ParamStore { + return &ParamStore{Txn: db.ReadWriter()} +} + +func (ps *ParamStore) Set(_ sdk.Context, key []byte, value interface{}) { + bz, err := json.Marshal(value) + if err != nil { + panic(err) + } + + ps.Txn.Set(key, bz) +} + +func (ps *ParamStore) Has(_ sdk.Context, key []byte) bool { + ok, err := ps.Txn.Has(key) + if err != nil { + panic(err) + } + + return ok +} + +func (ps *ParamStore) Get(_ sdk.Context, key []byte, ptr interface{}) { + bz, err := ps.Txn.Get(key) + if err != nil { + panic(err) + } + + if len(bz) == 0 { + return + } + + if err := json.Unmarshal(bz, ptr); err != nil { + panic(err) + } +} diff --git a/testutil/network/network.go b/testutil/network/network.go index 0939f83ab057..7b49ab9da5af 100644 --- a/testutil/network/network.go +++ b/testutil/network/network.go @@ -21,7 +21,6 @@ import ( tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" tmclient "github.com/tendermint/tendermint/rpc/client" - dbm "github.com/tendermint/tm-db" "google.golang.org/grpc" "cosmossdk.io/math" @@ -38,6 +37,7 @@ import ( "github.com/cosmos/cosmos-sdk/crypto/hd" "github.com/cosmos/cosmos-sdk/crypto/keyring" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + "github.com/cosmos/cosmos-sdk/db" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/runtime" "github.com/cosmos/cosmos-sdk/server" @@ -177,7 +177,7 @@ func DefaultConfigWithAppConfig(appConfig depinject.Config) (Config, error) { } app := appBuilder.Build( val.GetCtx().Logger, - dbm.NewMemDB(), + db.NewMemDB(), nil, baseapp.SetPruning(pruningtypes.NewPruningOptionsFromString(val.GetAppConfig().Pruning)), baseapp.SetMinGasPrices(val.GetAppConfig().MinGasPrices), @@ -185,7 +185,7 @@ func DefaultConfigWithAppConfig(appConfig depinject.Config) (Config, error) { testdata.RegisterQueryServer(app.GRPCQueryRouter(), testdata.QueryImpl{}) - if err := app.Load(true); err != nil { + if err := app.Load(); err != nil { panic(err) } diff --git a/testutil/network/util.go b/testutil/network/util.go index e3ace5544445..3d0b4d83cf24 100644 --- a/testutil/network/util.go +++ b/testutil/network/util.go @@ -34,6 +34,7 @@ func startInProcess(cfg Config, val *Validator) error { } app := cfg.AppConstructor(*val) + // app.SetParamStore() genDoc, err := types.GenesisDocFromFile(tmCfg.GenesisFile()) if err != nil { diff --git a/testutil/sims/app_helpers.go b/testutil/sims/app_helpers.go index 4a689324b532..5c3f7597cb8e 100644 --- a/testutil/sims/app_helpers.go +++ b/testutil/sims/app_helpers.go @@ -11,14 +11,15 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" tmtypes "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" "cosmossdk.io/depinject" + "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/client/flags" "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" + dbm "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/runtime" servertypes "github.com/cosmos/cosmos-sdk/server/types" "github.com/cosmos/cosmos-sdk/testutil/mock" @@ -107,13 +108,14 @@ func SetupWithConfiguration(appConfig depinject.Config, validatorSet func() (*tm } if baseAppOption != nil { - app = appBuilder.Build(log.NewNopLogger(), dbm.NewMemDB(), nil, baseAppOption) + app = appBuilder.Build(log.NewNopLogger(), dbm.NewMemDB(), nil, baseapp.AppOptionFunc(baseAppOption)) } else { app = appBuilder.Build(log.NewNopLogger(), dbm.NewMemDB(), nil) } - if err := app.Load(true); err != nil { + if err := app.Load(); err != nil { return nil, fmt.Errorf("failed to load app: %w", err) } + app.SetParamStore(mock.NewParamStore(dbm.NewMemDB())) // create validator set valSet, err := validatorSet() diff --git a/types/context.go b/types/context.go index ec5f2c3774dd..38f84843cc1d 100644 --- a/types/context.go +++ b/types/context.go @@ -11,7 +11,8 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/cosmos/cosmos-sdk/store/gaskv" - storetypes "github.com/cosmos/cosmos-sdk/store/types" + stypes "github.com/cosmos/cosmos-sdk/store/types" + stypes2 "github.com/cosmos/cosmos-sdk/store/v2alpha1" ) /* @@ -24,7 +25,7 @@ and standard additions here would be better just to add to the Context struct */ type Context struct { baseCtx context.Context - ms MultiStore + store stypes2.MultiStore header tmproto.Header headerHash tmbytes.HexBytes chainID string @@ -45,21 +46,21 @@ type Context struct { type Request = Context // Read-only accessors -func (c Context) Context() context.Context { return c.baseCtx } -func (c Context) MultiStore() MultiStore { return c.ms } -func (c Context) BlockHeight() int64 { return c.header.Height } -func (c Context) BlockTime() time.Time { return c.header.Time } -func (c Context) ChainID() string { return c.chainID } -func (c Context) TxBytes() []byte { return c.txBytes } -func (c Context) Logger() log.Logger { return c.logger } -func (c Context) VoteInfos() []abci.VoteInfo { return c.voteInfo } -func (c Context) GasMeter() GasMeter { return c.gasMeter } -func (c Context) BlockGasMeter() GasMeter { return c.blockGasMeter } -func (c Context) IsCheckTx() bool { return c.checkTx } -func (c Context) IsReCheckTx() bool { return c.recheckTx } -func (c Context) MinGasPrices() DecCoins { return c.minGasPrice } -func (c Context) EventManager() *EventManager { return c.eventManager } -func (c Context) Priority() int64 { return c.priority } +func (c Context) Context() context.Context { return c.baseCtx } +func (c Context) MultiStore() stypes2.MultiStore { return c.store } +func (c Context) BlockHeight() int64 { return c.header.Height } +func (c Context) BlockTime() time.Time { return c.header.Time } +func (c Context) ChainID() string { return c.chainID } +func (c Context) TxBytes() []byte { return c.txBytes } +func (c Context) Logger() log.Logger { return c.logger } +func (c Context) VoteInfos() []abci.VoteInfo { return c.voteInfo } +func (c Context) GasMeter() GasMeter { return c.gasMeter } +func (c Context) BlockGasMeter() GasMeter { return c.blockGasMeter } +func (c Context) IsCheckTx() bool { return c.checkTx } +func (c Context) IsReCheckTx() bool { return c.recheckTx } +func (c Context) MinGasPrices() DecCoins { return c.minGasPrice } +func (c Context) EventManager() *EventManager { return c.eventManager } +func (c Context) Priority() int64 { return c.priority } // clone the header before returning func (c Context) BlockHeader() tmproto.Header { @@ -91,17 +92,17 @@ func (c Context) Err() error { } // create a new context -func NewContext(ms MultiStore, header tmproto.Header, isCheckTx bool, logger log.Logger) Context { +func NewContext(ms stypes2.MultiStore, header tmproto.Header, isCheckTx bool, logger log.Logger) Context { // https://github.com/gogo/protobuf/issues/519 header.Time = header.Time.UTC() return Context{ baseCtx: context.Background(), - ms: ms, + store: ms, header: header, chainID: header.ChainID, checkTx: isCheckTx, logger: logger, - gasMeter: storetypes.NewInfiniteGasMeter(), + gasMeter: stypes.NewInfiniteGasMeter(), minGasPrice: DecCoins{}, eventManager: NewEventManager(), } @@ -114,8 +115,8 @@ func (c Context) WithContext(ctx context.Context) Context { } // WithMultiStore returns a Context with an updated MultiStore. -func (c Context) WithMultiStore(ms MultiStore) Context { - c.ms = ms +func (c Context) WithMultiStore(ms stypes2.MultiStore) Context { + c.store = ms return c } @@ -236,7 +237,7 @@ func (c Context) WithPriority(p int64) Context { // TODO: remove??? func (c Context) IsZero() bool { - return c.ms == nil + return c.store == nil } func (c Context) WithValue(key, value interface{}) Context { @@ -257,22 +258,22 @@ func (c Context) Value(key interface{}) interface{} { // ---------------------------------------------------------------------------- // KVStore fetches a KVStore from the MultiStore. -func (c Context) KVStore(key storetypes.StoreKey) KVStore { - return gaskv.NewStore(c.MultiStore().GetKVStore(key), c.GasMeter(), storetypes.KVGasConfig()) +func (c Context) KVStore(key stypes.StoreKey) stypes.KVStore { + return gaskv.NewStore(c.MultiStore().GetKVStore(key), c.GasMeter(), stypes.KVGasConfig()) } // TransientStore fetches a TransientStore from the MultiStore. -func (c Context) TransientStore(key storetypes.StoreKey) KVStore { - return gaskv.NewStore(c.MultiStore().GetKVStore(key), c.GasMeter(), storetypes.TransientGasConfig()) +func (c Context) TransientStore(key stypes.StoreKey) stypes.KVStore { + return gaskv.NewStore(c.MultiStore().GetKVStore(key), c.GasMeter(), stypes.TransientGasConfig()) } // CacheContext returns a new Context with the multi-store cached and a new // EventManager. The cached context is written to the context when writeCache // is called. func (c Context) CacheContext() (cc Context, writeCache func()) { - cms := c.MultiStore().CacheMultiStore() - cc = c.WithMultiStore(cms).WithEventManager(NewEventManager()) - return cc, cms.Write + cs := c.MultiStore().CacheWrap() + cc = c.WithMultiStore(cs).WithEventManager(NewEventManager()) + return cc, cs.Write } var _ context.Context = Context{} diff --git a/types/store.go b/types/store.go index 274d4f9c2c2b..1de65928024e 100644 --- a/types/store.go +++ b/types/store.go @@ -6,20 +6,18 @@ import ( "strings" "github.com/cosmos/cosmos-sdk/store/types" + types2 "github.com/cosmos/cosmos-sdk/store/v2alpha1" "github.com/cosmos/cosmos-sdk/types/kv" ) type ( - Store = types.Store - Committer = types.Committer - CommitStore = types.CommitStore - Queryable = types.Queryable - MultiStore = types.MultiStore - CacheMultiStore = types.CacheMultiStore - CommitMultiStore = types.CommitMultiStore - MultiStorePersistentCache = types.MultiStorePersistentCache - KVStore = types.KVStore - Iterator = types.Iterator + Queryable = types2.Queryable + KVStore = types2.KVStore + Iterator = types2.Iterator + MultiStore = types2.MultiStore + CommitMultiStore = types2.CommitMultiStore + CacheMultiStore = types2.CacheMultiStore + MultiStorePersistentCache = types2.MultiStorePersistentCache ) // StoreDecoderRegistry defines each of the modules store decoders. Used for ImportExport diff --git a/types/utils.go b/types/utils.go index efd9a9c71a81..d15c125fbddc 100644 --- a/types/utils.go +++ b/types/utils.go @@ -3,15 +3,14 @@ package types import ( "encoding/binary" "encoding/json" - "fmt" "time" + "github.com/cosmos/cosmos-sdk/db" "github.com/cosmos/cosmos-sdk/types/kv" - dbm "github.com/tendermint/tm-db" ) -// This is set at compile time. Could be cleveldb, defaults is goleveldb. -var backend = dbm.GoLevelDBBackend +// This is set at compile time. Could be memdb, badgerdb, rocksdb. Default is badgerdb. +var backend = db.BadgerDBBackend // SortedJSON takes any JSON and returns it sorted by keys. Also, all white-spaces // are removed. @@ -76,19 +75,6 @@ func ParseTimeBytes(bz []byte) (time.Time, error) { return t.UTC().Round(0), nil } -// NewLevelDB instantiate a new LevelDB instance according to DBBackend. -// -// Deprecated: Use NewDB (from "github.com/tendermint/tm-db") instead. Suggested backendType is tendermint config's DBBackend value. -func NewLevelDB(name, dir string) (db dbm.DB, err error) { - defer func() { - if r := recover(); r != nil { - err = fmt.Errorf("couldn't create db: %v", r) - } - }() - - return dbm.NewDB(name, backend, dir) -} - // copy bytes func CopyBytes(bz []byte) (ret []byte) { if bz == nil { diff --git a/x/capability/capability_test.go b/x/capability/capability_test.go index ce14eb4d321f..6e974b21c9dc 100644 --- a/x/capability/capability_test.go +++ b/x/capability/capability_test.go @@ -7,7 +7,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/cosmos/cosmos-sdk/baseapp" + ba "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/runtime" storetypes "github.com/cosmos/cosmos-sdk/store/types" @@ -34,10 +34,7 @@ func (suite *CapabilityTestSuite) SetupTest() { suite.memKey = storetypes.NewMemoryStoreKey("testingkey") app, err := simtestutil.SetupWithBaseAppOption(testutil.AppConfig, - func(ba *baseapp.BaseApp) { - ba.MountStores(suite.memKey) - }, - &suite.cdc, + ba.SetSubstores(suite.memKey).Apply, &suite.cdc, &suite.keeper, ) suite.Require().NoError(err) diff --git a/x/capability/keeper/keeper_test.go b/x/capability/keeper/keeper_test.go index c859032013bc..7049d242163e 100644 --- a/x/capability/keeper/keeper_test.go +++ b/x/capability/keeper/keeper_test.go @@ -277,7 +277,7 @@ func (suite KeeperTestSuite) TestRevertCapability() { ms := suite.ctx.MultiStore() - msCache := ms.CacheMultiStore() + msCache := ms.CacheWrap() cacheCtx := suite.ctx.WithMultiStore(msCache) capName := "revert" diff --git a/x/gov/genesis_test.go b/x/gov/genesis_test.go index 518ad8d8a9f0..ac67b842e22e 100644 --- a/x/gov/genesis_test.go +++ b/x/gov/genesis_test.go @@ -8,8 +8,8 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/simapp" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" sdk "github.com/cosmos/cosmos-sdk/types" @@ -73,8 +73,8 @@ func TestImportExportQueues(t *testing.T) { panic(err) } - db := dbm.NewMemDB() - app2 := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, simapp.MakeTestEncodingConfig(), simtestutil.NewAppOptionsWithFlagHome(simapp.DefaultNodeHome)) + db := memdb.NewDB() + app2 := simapp.NewSimApp(log.NewNopLogger(), db, nil, simapp.MakeTestEncodingConfig(), simtestutil.NewAppOptionsWithFlagHome(simapp.DefaultNodeHome)) app2.InitChain( abci.RequestInitChain{ diff --git a/x/gov/module_test.go b/x/gov/module_test.go index f9c7c97137b6..43a17fb06d22 100644 --- a/x/gov/module_test.go +++ b/x/gov/module_test.go @@ -8,9 +8,9 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/simapp" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" @@ -19,14 +19,14 @@ import ( ) func TestItCreatesModuleAccountOnInitBlock(t *testing.T) { - db := dbm.NewMemDB() + db := memdb.NewDB() encCdc := simapp.MakeTestEncodingConfig() appOptions := make(simtestutil.AppOptionsMap, 0) appOptions[flags.FlagHome] = simapp.DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = 5 - app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, encCdc, appOptions) + app := simapp.NewSimApp(log.NewNopLogger(), db, nil, encCdc, appOptions) genesisState := simapp.GenesisStateWithSingleValidator(t, app) stateBytes, err := tmjson.Marshal(genesisState) diff --git a/x/group/internal/orm/testsupport.go b/x/group/internal/orm/testsupport.go index 1b4a6847898a..e041c12c1d35 100644 --- a/x/group/internal/orm/testsupport.go +++ b/x/group/internal/orm/testsupport.go @@ -3,50 +3,64 @@ package orm import ( "fmt" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/store" "github.com/cosmos/cosmos-sdk/store/gaskv" storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" sdk "github.com/cosmos/cosmos-sdk/types" - dbm "github.com/tendermint/tm-db" ) type MockContext struct { - db *dbm.MemDB - store storetypes.CommitMultiStore + db *memdb.MemDB + store store.CommitMultiStore + params multi.StoreParams } func NewMockContext() *MockContext { - db := dbm.NewMemDB() + db := memdb.NewDB() + params := multi.DefaultStoreParams() + cms, err := multi.NewStore(db, params) + if err != nil { + panic(err) + } return &MockContext{ - db: dbm.NewMemDB(), - store: store.NewCommitMultiStore(db), + db: db, + store: cms, + params: params, } } -func (m MockContext) KVStore(key storetypes.StoreKey) sdk.KVStore { - if s := m.store.GetCommitKVStore(key); s != nil { - return s +func (m *MockContext) KVStore(key store.Key) sdk.KVStore { + if m.store.HasKVStore(key) { + return m.store.GetKVStore(key) + } + err := m.store.Close() + if err != nil { + panic(err) + } + if err = m.params.RegisterSubstore(key, storetypes.StoreTypePersistent); err != nil { + panic(err) } - m.store.MountStoreWithDB(key, storetypes.StoreTypeIAVL, m.db) - if err := m.store.LoadLatestVersion(); err != nil { + if m.store, err = multi.NewStore(m.db, m.params); err != nil { panic(err) } - return m.store.GetCommitKVStore(key) + return m.store.GetKVStore(key) } type debuggingGasMeter struct { - g storetypes.GasMeter + g store.GasMeter } -func (d debuggingGasMeter) GasConsumed() storetypes.Gas { +func (d debuggingGasMeter) GasConsumed() store.Gas { return d.g.GasConsumed() } -func (d debuggingGasMeter) GasRemaining() storetypes.Gas { +func (d debuggingGasMeter) GasRemaining() store.Gas { return d.g.GasRemaining() } -func (d debuggingGasMeter) GasConsumedToLimit() storetypes.Gas { +func (d debuggingGasMeter) GasConsumedToLimit() store.Gas { return d.g.GasConsumedToLimit() } @@ -54,11 +68,11 @@ func (d debuggingGasMeter) RefundGas(amount uint64, descriptor string) { d.g.RefundGas(amount, descriptor) } -func (d debuggingGasMeter) Limit() storetypes.Gas { +func (d debuggingGasMeter) Limit() store.Gas { return d.g.Limit() } -func (d debuggingGasMeter) ConsumeGas(amount storetypes.Gas, descriptor string) { +func (d debuggingGasMeter) ConsumeGas(amount store.Gas, descriptor string) { fmt.Printf("++ Consuming gas: %q :%d\n", descriptor, amount) d.g.ConsumeGas(amount, descriptor) } @@ -89,11 +103,11 @@ func (g GasCountingMockContext) KVStore(store sdk.KVStore) sdk.KVStore { return gaskv.NewStore(store, g.GasMeter, storetypes.KVGasConfig()) } -func (g GasCountingMockContext) GasConsumed() storetypes.Gas { +func (g GasCountingMockContext) GasConsumed() store.Gas { return g.GasMeter.GasConsumed() } -func (g GasCountingMockContext) GasRemaining() storetypes.Gas { +func (g GasCountingMockContext) GasRemaining() store.Gas { return g.GasMeter.GasRemaining() } diff --git a/x/group/keeper/invariants_test.go b/x/group/keeper/invariants_test.go index 013f5e3a651b..9a7ea308e118 100644 --- a/x/group/keeper/invariants_test.go +++ b/x/group/keeper/invariants_test.go @@ -7,13 +7,12 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/store" - - storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/db/memdb" + storetypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/group" @@ -38,11 +37,12 @@ func (s *invariantTestSuite) SetupSuite() { group.RegisterInterfaces(interfaceRegistry) cdc := codec.NewProtoCodec(interfaceRegistry) key := sdk.NewKVStoreKey(group.ModuleName) - db := dbm.NewMemDB() - cms := store.NewCommitMultiStore(db) - cms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) - _ = cms.LoadLatestVersion() - sdkCtx := sdk.NewContext(cms, tmproto.Header{}, false, log.NewNopLogger()) + db := memdb.NewDB() + config := multi.DefaultStoreParams() + s.Require().NoError(config.RegisterSubstore(key, storetypes.StoreTypePersistent)) + ms, err := multi.NewV1MultiStoreAsV2(db, config) + s.Require().NoError(err) + sdkCtx := sdk.NewContext(ms, tmproto.Header{}, false, log.NewNopLogger()) s.ctx = sdkCtx s.cdc = cdc diff --git a/x/params/types/subspace_test.go b/x/params/types/subspace_test.go index 63874489dc6e..ef5442239e50 100644 --- a/x/params/types/subspace_test.go +++ b/x/params/types/subspace_test.go @@ -9,12 +9,13 @@ import ( "github.com/stretchr/testify/suite" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "cosmossdk.io/depinject" "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/store" + "github.com/cosmos/cosmos-sdk/db/memdb" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/params/testutil" "github.com/cosmos/cosmos-sdk/x/params/types" @@ -30,14 +31,15 @@ type SubspaceTestSuite struct { } func (suite *SubspaceTestSuite) SetupTest() { - db := dbm.NewMemDB() + db := memdb.NewDB() - ms := store.NewCommitMultiStore(db) - ms.MountStoreWithDB(key, storetypes.StoreTypeIAVL, db) - ms.MountStoreWithDB(tkey, storetypes.StoreTypeTransient, db) - suite.NoError(ms.LoadLatestVersion()) + config := multi.DefaultStoreParams() + suite.NoError(config.RegisterSubstore(key, storetypes.StoreTypePersistent)) + suite.NoError(config.RegisterSubstore(tkey, storetypes.StoreTypeTransient)) + ms, err := multi.NewV1MultiStoreAsV2(db, config) + suite.NoError(err) - err := depinject.Inject(testutil.AppConfig, + err = depinject.Inject(testutil.AppConfig, &suite.cdc, &suite.amino, ) diff --git a/x/simulation/log.go b/x/simulation/log.go index b22709b65414..8d775f87b70b 100644 --- a/x/simulation/log.go +++ b/x/simulation/log.go @@ -62,7 +62,7 @@ func createLogFile() *os.File { if err != nil { panic(err) } - fmt.Printf("Logs to writing to %s\n", filePath) + fmt.Printf("Writing logs to %s\n", filePath) return f } diff --git a/x/slashing/keeper/signing_info.go b/x/slashing/keeper/signing_info.go index d65b773ebc66..d986db27c45d 100644 --- a/x/slashing/keeper/signing_info.go +++ b/x/slashing/keeper/signing_info.go @@ -9,7 +9,7 @@ import ( "github.com/cosmos/cosmos-sdk/x/slashing/types" ) -// GetValidatorSigningInfo retruns the ValidatorSigningInfo for a specific validator +// GetValidatorSigningInfo returns the ValidatorSigningInfo for a specific validator // ConsAddress func (k Keeper) GetValidatorSigningInfo(ctx sdk.Context, address sdk.ConsAddress) (info types.ValidatorSigningInfo, found bool) { store := ctx.KVStore(k.storeKey) diff --git a/x/staking/module_test.go b/x/staking/module_test.go index 12faab19e0c2..37e5f9310e5d 100644 --- a/x/staking/module_test.go +++ b/x/staking/module_test.go @@ -8,9 +8,9 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/db/memdb" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/simapp" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" @@ -19,14 +19,14 @@ import ( ) func TestItCreatesModuleAccountOnInitBlock(t *testing.T) { - db := dbm.NewMemDB() + db := memdb.NewDB() encCdc := simapp.MakeTestEncodingConfig() appOptions := make(simtestutil.AppOptionsMap, 0) appOptions[flags.FlagHome] = simapp.DefaultNodeHome appOptions[server.FlagInvCheckPeriod] = 5 - app := simapp.NewSimApp(log.NewNopLogger(), db, nil, true, encCdc, appOptions) + app := simapp.NewSimApp(log.NewNopLogger(), db, nil, encCdc, appOptions) genesisState := simapp.GenesisStateWithSingleValidator(t, app) stateBytes, err := tmjson.Marshal(genesisState) diff --git a/x/upgrade/types/storeloader.go b/x/upgrade/types/storeloader.go index 3911effbed30..6fa859206cff 100644 --- a/x/upgrade/types/storeloader.go +++ b/x/upgrade/types/storeloader.go @@ -2,22 +2,20 @@ package types import ( "github.com/cosmos/cosmos-sdk/baseapp" - storetypes "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" + storetypes "github.com/cosmos/cosmos-sdk/store/v2alpha1" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" ) -// UpgradeStoreLoader is used to prepare baseapp with a fixed StoreLoader -// pattern. This is useful for custom upgrade loading logic. -func UpgradeStoreLoader(upgradeHeight int64, storeUpgrades *storetypes.StoreUpgrades) baseapp.StoreLoader { - return func(ms sdk.CommitMultiStore) error { - if upgradeHeight == ms.LastCommitID().Version+1 { - // Check if the current commit version and upgrade height matches +// UpgradeStoreOption is used to prepare baseapp with a fixed StoreOption. +// This is useful for custom upgrade loading logic. +func UpgradeStoreOption(upgradeHeight uint64, storeUpgrades *storetypes.StoreUpgrades) baseapp.StoreOption { + return func(par *multi.StoreParams, loadHeight uint64) error { + // Check if the current commit version and upgrade height matches + if upgradeHeight == loadHeight+1 { if len(storeUpgrades.Renamed) > 0 || len(storeUpgrades.Deleted) > 0 || len(storeUpgrades.Added) > 0 { - return ms.LoadLatestVersionAndUpgrade(storeUpgrades) + par.Upgrades = storeUpgrades } } - - // Otherwise load default store loader - return baseapp.DefaultStoreLoader(ms) + return nil } } diff --git a/x/upgrade/types/storeloader_test.go b/x/upgrade/types/storeloader_test.go index 6aaefa69d198..2b6d7d6b9236 100644 --- a/x/upgrade/types/storeloader_test.go +++ b/x/upgrade/types/storeloader_test.go @@ -11,22 +11,16 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - dbm "github.com/tendermint/tm-db" "github.com/cosmos/cosmos-sdk/baseapp" + dbm "github.com/cosmos/cosmos-sdk/db" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" "github.com/cosmos/cosmos-sdk/server" - "github.com/cosmos/cosmos-sdk/store/rootmulti" storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/store/v2alpha1/multi" sdk "github.com/cosmos/cosmos-sdk/types" ) -func useUpgradeLoader(height int64, upgrades *storetypes.StoreUpgrades) func(*baseapp.BaseApp) { - return func(app *baseapp.BaseApp) { - app.SetStoreLoader(UpgradeStoreLoader(height, upgrades)) - } -} - func defaultLogger() log.Logger { writer := zerolog.ConsoleWriter{Out: os.Stderr} return server.ZeroLogWrapper{ @@ -34,37 +28,36 @@ func defaultLogger() log.Logger { } } -func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) +func initStore(t *testing.T, db dbm.Connection, config multi.StoreParams, key storetypes.StoreKey, k, v []byte) { + rs, err := multi.NewV1MultiStoreAsV2(db, config) + require.NoError(t, err) rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) require.Equal(t, int64(0), rs.LastCommitID().Version) // write some data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) + kv := rs.GetKVStore(key) require.NotNil(t, kv) kv.Set(k, v) commitID := rs.Commit() require.Equal(t, int64(1), commitID.Version) + require.NoError(t, rs.Close()) } -func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) +func checkStore(t *testing.T, db dbm.Connection, config multi.StoreParams, ver int64, key storetypes.StoreKey, k, v []byte) { + rs, err := multi.NewV1MultiStoreAsV2(db, config) + require.NoError(t, err) rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) require.Equal(t, ver, rs.LastCommitID().Version) - // query data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) - - require.NotNil(t, kv) - require.Equal(t, v, kv.Get(k)) + if v != nil { + kv := rs.GetKVStore(key) + require.NotNil(t, kv) + require.Equal(t, v, kv.Get(k)) + } else { + // v == nil indicates the substore was moved and no longer exists + require.Panics(t, func() { _ = rs.GetKVStore(key) }) + } + require.NoError(t, rs.Close()) } // Test that we can make commits and then reload old versions. @@ -89,25 +82,27 @@ func TestSetLoader(t *testing.T) { _, err = os.Stat(upgradeInfoFilePath) require.NoError(t, err) + fooKey := sdk.NewKVStoreKey("foo") + barKey := sdk.NewKVStoreKey("bar") cases := map[string]struct { - setLoader func(*baseapp.BaseApp) - origStoreKey string - loadStoreKey string + setLoader baseapp.AppOption + origStoreKey storetypes.StoreKey + loadStoreKey storetypes.StoreKey }{ "don't set loader": { setLoader: nil, - origStoreKey: "foo", - loadStoreKey: "foo", + origStoreKey: fooKey, + loadStoreKey: fooKey, }, "rename with inline opts": { - setLoader: useUpgradeLoader(upgradeHeight, &storetypes.StoreUpgrades{ + setLoader: UpgradeStoreOption(uint64(upgradeHeight), &storetypes.StoreUpgrades{ Renamed: []storetypes.StoreRename{{ OldKey: "foo", NewKey: "bar", }}, }), - origStoreKey: "foo", - loadStoreKey: "bar", + origStoreKey: fooKey, + loadStoreKey: barKey, }, } @@ -117,47 +112,54 @@ func TestSetLoader(t *testing.T) { for name, tc := range cases { tc := tc t.Run(name, func(t *testing.T) { + origConfig := multi.DefaultStoreParams() + loadConfig := multi.DefaultStoreParams() + require.NoError(t, origConfig.RegisterSubstore(tc.origStoreKey, storetypes.StoreTypePersistent)) + require.NoError(t, loadConfig.RegisterSubstore(tc.loadStoreKey, storetypes.StoreTypePersistent)) + // prepare a db with some data db := dbm.NewMemDB() - - initStore(t, db, tc.origStoreKey, k, v) + initStore(t, db, origConfig, tc.origStoreKey, k, v) // load the app with the existing db - opts := []func(*baseapp.BaseApp){baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} - + opts := []baseapp.AppOption{ + baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), + baseapp.SetSubstores(tc.origStoreKey), + } origapp := baseapp.NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) - origapp.MountStores(sdk.NewKVStoreKey(tc.origStoreKey)) - err := origapp.LoadLatestVersion() - require.Nil(t, err) + require.NoError(t, origapp.Init()) for i := int64(2); i <= upgradeHeight-1; i++ { origapp.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: i}}) res := origapp.Commit() require.NotNil(t, res.Data) } + require.NoError(t, origapp.CloseStore()) + // load the new app with the original app db + opts = []baseapp.AppOption{ + baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)), + baseapp.SetSubstores(tc.loadStoreKey), + } if tc.setLoader != nil { opts = append(opts, tc.setLoader) } - - // load the new app with the original app db app := baseapp.NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) - app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) - err = app.LoadLatestVersion() - require.Nil(t, err) + require.NoError(t, app.Init()) // "execute" one block app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: upgradeHeight}}) res := app.Commit() require.NotNil(t, res.Data) + require.NoError(t, app.CloseStore()) // checking the case of the store being renamed if tc.setLoader != nil { - checkStore(t, db, upgradeHeight, tc.origStoreKey, k, nil) + checkStore(t, db, loadConfig, upgradeHeight, tc.origStoreKey, k, nil) } // check db is properly updated - checkStore(t, db, upgradeHeight, tc.loadStoreKey, k, v) + checkStore(t, db, loadConfig, upgradeHeight, tc.loadStoreKey, k, v) }) } }