From 797bd12af5192c45891c82f08b38e5ef868e1593 Mon Sep 17 00:00:00 2001 From: Facundo Medica <14063057+facundomedica@users.noreply.github.com> Date: Tue, 30 Aug 2022 18:50:47 -0300 Subject: [PATCH] refactor!: Remove sdk.Router and refactor baseapp tests (#13005) * test * progress-ish * progress * progress * make mocks * progress * test * progress * progress * progress * progress * progress * progress * fix mock tests * progress * pretty much done, might need some tidying up * lint * re-enable blockgas test * remove router * gofumpt * remove more references of Router * fix * remove unused code * remove unused code Co-authored-by: Marko --- baseapp/abci_test.go | 45 + baseapp/baseapp.go | 28 - baseapp/baseapp_test.go | 2152 +------------------- baseapp/block_gas_test.go | 59 +- baseapp/deliver_tx_test.go | 2186 +++++++++++++++++++++ baseapp/options.go | 8 - baseapp/router.go | 41 - baseapp/router_test.go | 31 - baseapp/testutil/buf.gen.yaml | 5 + baseapp/testutil/buf.lock | 17 + baseapp/testutil/buf.yaml | 4 + baseapp/testutil/messages.go | 56 + baseapp/testutil/messages.pb.go | 1293 ++++++++++++ baseapp/testutil/messages.proto | 39 + client/v2/internal/testpb/query.pulsar.go | 4 +- scripts/protocgen.sh | 3 + server/mock/app.go | 55 +- server/mock/tx.go | 36 +- store/cachekv/search_benchmark_test.go | 3 +- tests/integration/genutil/gentx_test.go | 1 - tests/mocks/types_router.go | 51 - types/module/module.go | 3 - types/router.go | 32 - types/router_test.go | 58 - x/bank/bench_test.go | 7 +- x/bank/simulation/operations_test.go | 1 - x/distribution/keeper/delegation_test.go | 1 - x/genutil/gentx_test.go | 12 +- x/gov/migrations/v3/store_test.go | 2 +- 29 files changed, 3790 insertions(+), 2443 deletions(-) create mode 100644 baseapp/deliver_tx_test.go delete mode 100644 baseapp/router.go delete mode 100644 baseapp/router_test.go create mode 100644 baseapp/testutil/buf.gen.yaml create mode 100644 baseapp/testutil/buf.lock create mode 100644 baseapp/testutil/buf.yaml create mode 100644 baseapp/testutil/messages.go create mode 100644 baseapp/testutil/messages.pb.go create mode 100644 baseapp/testutil/messages.proto delete mode 100644 types/router_test.go diff --git a/baseapp/abci_test.go b/baseapp/abci_test.go index 8b331ca03811..fd383128c7d5 100644 --- a/baseapp/abci_test.go +++ b/baseapp/abci_test.go @@ -1,10 +1,13 @@ package baseapp import ( + "encoding/json" + "os" "testing" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" dbm "github.com/tendermint/tm-db" @@ -12,8 +15,13 @@ import ( "github.com/cosmos/cosmos-sdk/snapshots" snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" "github.com/cosmos/cosmos-sdk/testutil" + sdk "github.com/cosmos/cosmos-sdk/types" ) +func defaultLogger() log.Logger { + return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app") +} + func TestGetBlockRentionHeight(t *testing.T) { logger := defaultLogger() db := dbm.NewMemDB() @@ -164,3 +172,40 @@ func TestBaseAppCreateQueryContext(t *testing.T) { }) } } + +type paramStore struct { + db *dbm.MemDB +} + +func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { + bz, err := json.Marshal(value) + if err != nil { + panic(err) + } + + ps.db.Set(key, bz) +} + +func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { + ok, err := ps.db.Has(key) + if err != nil { + panic(err) + } + + return ok +} + +func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { + bz, err := ps.db.Get(key) + if err != nil { + panic(err) + } + + if len(bz) == 0 { + return + } + + if err := json.Unmarshal(bz, ptr); err != nil { + panic(err) + } +} diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 3cd69facbd00..e199a67c2fbb 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -18,7 +18,6 @@ import ( storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" ) const ( @@ -49,7 +48,6 @@ type BaseApp struct { //nolint: maligned db dbm.DB // common DB backend cms sdk.CommitMultiStore // Main (uncached) state storeLoader StoreLoader // function to handle store loading, may be overridden with SetStoreLoader() - router sdk.Router // handle any kind of legacy message queryRouter sdk.QueryRouter // router for redirecting query calls grpcQueryRouter *GRPCQueryRouter // router for redirecting gRPC query calls msgServiceRouter *MsgServiceRouter // router for redirecting Msg service messages @@ -149,7 +147,6 @@ func NewBaseApp( db: db, cms: store.NewCommitMultiStore(db), storeLoader: DefaultStoreLoader, - router: NewRouter(), queryRouter: NewQueryRouter(), grpcQueryRouter: NewGRPCQueryRouter(), msgServiceRouter: NewMsgServiceRouter(), @@ -367,17 +364,6 @@ func (app *BaseApp) setIndexEvents(ie []string) { } } -// Router returns the legacy router of the BaseApp. -func (app *BaseApp) Router() sdk.Router { - if app.sealed { - // We cannot return a Router when the app is sealed because we can't have - // any routes modified which would cause unexpected routing behavior. - panic("Router() on sealed BaseApp") - } - - return app.router -} - // QueryRouter returns the QueryRouter of a BaseApp. func (app *BaseApp) QueryRouter() sdk.QueryRouter { return app.queryRouter } @@ -749,20 +735,6 @@ func (app *BaseApp) runMsgs(ctx sdk.Context, msgs []sdk.Msg, mode runTxMode) (*s // ADR 031 request type routing msgResult, err = handler(ctx, msg) eventMsgName = sdk.MsgTypeURL(msg) - } else if legacyMsg, ok := msg.(legacytx.LegacyMsg); ok { - // legacy sdk.Msg routing - // Assuming that the app developer has migrated all their Msgs to - // proto messages and has registered all `Msg services`, then this - // path should never be called, because all those Msgs should be - // registered within the `msgServiceRouter` already. - msgRoute := legacyMsg.Route() - eventMsgName = legacyMsg.Type() - handler := app.router.Route(ctx, msgRoute) - if handler == nil { - return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized message route: %s; message index: %d", msgRoute, i) - } - - msgResult, err = handler(ctx, msg) } else { return nil, sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "can't route message %+v", msg) } diff --git a/baseapp/baseapp_test.go b/baseapp/baseapp_test.go index 28cdbb39c0ca..e60394d5e2df 100644 --- a/baseapp/baseapp_test.go +++ b/baseapp/baseapp_test.go @@ -1,399 +1,46 @@ package baseapp import ( - "bytes" - "encoding/binary" - "encoding/json" - "fmt" - "math/rand" - "os" - "strings" - "sync" "testing" - "time" - "github.com/gogo/protobuf/jsonpb" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" dbm "github.com/tendermint/tm-db" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" - "github.com/cosmos/cosmos-sdk/snapshots" - snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" - "github.com/cosmos/cosmos-sdk/store/rootmulti" storetypes "github.com/cosmos/cosmos-sdk/store/types" - "github.com/cosmos/cosmos-sdk/testutil" - "github.com/cosmos/cosmos-sdk/testutil/testdata" sdk "github.com/cosmos/cosmos-sdk/types" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" - "github.com/cosmos/cosmos-sdk/x/auth/migrations/legacytx" ) var ( capKey1 = sdk.NewKVStoreKey("key1") capKey2 = sdk.NewKVStoreKey("key2") - - // testTxPriority is the CheckTx priority that we set in the test - // antehandler. - testTxPriority = int64(42) ) -type paramStore struct { - db *dbm.MemDB -} - -type setupConfig struct { - blocks uint64 - blockTxs int - snapshotInterval uint64 - snapshotKeepRecent uint32 - pruningOpts pruningtypes.PruningOptions -} - -func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { - bz, err := json.Marshal(value) - if err != nil { - panic(err) - } - - ps.db.Set(key, bz) -} - -func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { - ok, err := ps.db.Has(key) - if err != nil { - panic(err) - } - - return ok -} - -func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { - bz, err := ps.db.Get(key) - if err != nil { - panic(err) - } - - if len(bz) == 0 { - return - } - - if err := json.Unmarshal(bz, ptr); err != nil { - panic(err) - } -} - -func defaultLogger() log.Logger { - return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app") -} - -func newBaseApp(name string, options ...func(*BaseApp)) *BaseApp { - logger := defaultLogger() - db := dbm.NewMemDB() - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - return NewBaseApp(name, logger, db, testTxDecoder(codec), options...) -} - -func registerTestCodec(cdc *codec.LegacyAmino) { - // register Tx, Msg - sdk.RegisterLegacyAminoCodec(cdc) - - // register test types - cdc.RegisterConcrete(&txTest{}, "cosmos-sdk/baseapp/txTest", nil) - cdc.RegisterConcrete(&msgCounter{}, "cosmos-sdk/baseapp/msgCounter", nil) - cdc.RegisterConcrete(&msgCounter2{}, "cosmos-sdk/baseapp/msgCounter2", nil) - cdc.RegisterConcrete(&msgKeyValue{}, "cosmos-sdk/baseapp/msgKeyValue", nil) - cdc.RegisterConcrete(&msgNoRoute{}, "cosmos-sdk/baseapp/msgNoRoute", nil) -} - -// aminoTxEncoder creates a amino TxEncoder for testing purposes. -func aminoTxEncoder() sdk.TxEncoder { - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - return legacytx.StdTxConfig{Cdc: cdc}.TxEncoder() -} - -// simple one store baseapp -func setupBaseApp(t *testing.T, options ...func(*BaseApp)) *BaseApp { - app := newBaseApp(t.Name(), options...) - require.Equal(t, t.Name(), app.Name()) - - app.MountStores(capKey1, capKey2) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - - // stores are mounted - err := app.LoadLatestVersion() - require.Nil(t, err) - return app -} - -// simple one store baseapp with data and snapshots. Each tx is 1 MB in size (uncompressed). -func setupBaseAppWithSnapshots(t *testing.T, config *setupConfig) (*BaseApp, error) { - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - routerOpt := func(bapp *BaseApp) { - bapp.Router().AddRoute(sdk.NewRoute(routeMsgKeyValue, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - kv := msg.(*msgKeyValue) - bapp.cms.GetCommitKVStore(capKey2).Set(kv.Key, kv.Value) - return &sdk.Result{}, nil - })) - } - - snapshotTimeout := 1 * time.Minute - snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) - require.NoError(t, err) - - app := setupBaseApp(t, routerOpt, SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(config.snapshotInterval, uint32(config.snapshotKeepRecent))), SetPruning(config.pruningOpts)) - - app.InitChain(abci.RequestInitChain{}) - - r := rand.New(rand.NewSource(3920758213583)) - keyCounter := 0 - for height := int64(1); height <= int64(config.blocks); height++ { - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) - for txNum := 0; txNum < config.blockTxs; txNum++ { - tx := txTest{Msgs: []sdk.Msg{}} - for msgNum := 0; msgNum < 100; msgNum++ { - key := []byte(fmt.Sprintf("%v", keyCounter)) - value := make([]byte, 10000) - _, err := r.Read(value) - require.NoError(t, err) - tx.Msgs = append(tx.Msgs, msgKeyValue{Key: key, Value: value}) - keyCounter++ - } - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - resp := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, resp.IsOK(), "%v", resp.String()) - } - app.EndBlock(abci.RequestEndBlock{Height: height}) - app.Commit() - - // Wait for snapshot to be taken, since it happens asynchronously. - if config.snapshotInterval > 0 && uint64(height)%config.snapshotInterval == 0 { - start := time.Now() - for { - if time.Since(start) > snapshotTimeout { - t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) - } - snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) - require.NoError(t, err) - if snapshot != nil { - break - } - time.Sleep(100 * time.Millisecond) - } - } - } - - return app, nil +func TestSetMinGasPrices(t *testing.T) { + minGasPrices := sdk.DecCoins{sdk.NewInt64DecCoin("stake", 5000)} + app := setupBaseApp(t, SetMinGasPrices(minGasPrices.String())) + require.Equal(t, minGasPrices, app.minGasPrices) } -func TestMountStores(t *testing.T) { +func TestGetMaximumBlockGas(t *testing.T) { app := setupBaseApp(t) + app.InitChain(abci.RequestInitChain{}) + ctx := app.NewContext(true, tmproto.Header{}) - // check both stores - store1 := app.cms.GetCommitKVStore(capKey1) - require.NotNil(t, store1) - store2 := app.cms.GetCommitKVStore(capKey2) - require.NotNil(t, store2) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestLoadVersion(t *testing.T) { - logger := defaultLogger() - pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - // make a cap key and mount the store - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - - emptyCommitID := storetypes.CommitID{} - - // fresh store has zero/empty last commit - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, int64(0), lastHeight) - require.Equal(t, emptyCommitID, lastID) - - // execute a block, collect commit ID - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := storetypes.CommitID{Version: 1, Hash: res.Data} - - // execute a block, collect commit ID - header = tmproto.Header{Height: 2} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res = app.Commit() - commitID2 := storetypes.CommitID{Version: 2, Hash: res.Data} - - // reload with LoadLatestVersion - app = NewBaseApp(name, logger, db, nil, pruningOpt) - app.MountStores() - err = app.LoadLatestVersion() - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(2), commitID2) - - // reload with LoadVersion, see if you can commit the same block and get - // the same result - app = NewBaseApp(name, logger, db, nil, pruningOpt) - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - testLoadVersionHelper(t, app, int64(2), commitID2) -} - -func useDefaultLoader(app *BaseApp) { - app.SetStoreLoader(DefaultStoreLoader) -} - -func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, int64(0), rs.LastCommitID().Version) - - // write some data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) - require.NotNil(t, kv) - kv.Set(k, v) - commitID := rs.Commit() - require.Equal(t, int64(1), commitID.Version) -} - -func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { - rs := rootmulti.NewStore(db, log.NewNopLogger()) - rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) - key := sdk.NewKVStoreKey(storeKey) - rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) - err := rs.LoadLatestVersion() - require.Nil(t, err) - require.Equal(t, ver, rs.LastCommitID().Version) - - // query data in substore - kv, _ := rs.GetStore(key).(storetypes.KVStore) - require.NotNil(t, kv) - require.Equal(t, v, kv.Get(k)) -} - -// Test that we can make commits and then reload old versions. -// Test that LoadLatestVersion actually does. -func TestSetLoader(t *testing.T) { - cases := map[string]struct { - setLoader func(*BaseApp) - origStoreKey string - loadStoreKey string - }{ - "don't set loader": { - origStoreKey: "foo", - loadStoreKey: "foo", - }, - "default loader": { - setLoader: useDefaultLoader, - origStoreKey: "foo", - loadStoreKey: "foo", - }, - } - - k := []byte("key") - v := []byte("value") - - for name, tc := range cases { - tc := tc - t.Run(name, func(t *testing.T) { - // prepare a db with some data - db := dbm.NewMemDB() - initStore(t, db, tc.origStoreKey, k, v) - - // load the app with the existing db - opts := []func(*BaseApp){SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} - if tc.setLoader != nil { - opts = append(opts, tc.setLoader) - } - app := NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) - app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) - err := app.LoadLatestVersion() - require.Nil(t, err) - - // "execute" one block - app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) - res := app.Commit() - require.NotNil(t, res.Data) - - // check db is properly updated - checkStore(t, db, 2, tc.loadStoreKey, k, v) - checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) - }) - } -} - -func TestVersionSetterGetter(t *testing.T) { - logger := defaultLogger() - pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - require.Equal(t, "", app.Version()) - res := app.Query(abci.RequestQuery{Path: "app/version"}) - require.True(t, res.IsOK()) - require.Equal(t, "", string(res.Value)) - - versionString := "1.0.0" - app.SetVersion(versionString) - require.Equal(t, versionString, app.Version()) - res = app.Query(abci.RequestQuery{Path: "app/version"}) - require.True(t, res.IsOK()) - require.Equal(t, versionString, string(res.Value)) -} - -func TestLoadVersionInvalid(t *testing.T) { - logger := log.NewNopLogger() - pruningOpt := SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) - db := dbm.NewMemDB() - name := t.Name() - app := NewBaseApp(name, logger, db, nil, pruningOpt) - - err := app.LoadLatestVersion() - require.Nil(t, err) - - // require error when loading an invalid version - err = app.LoadVersion(-1) - require.Error(t, err) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - res := app.Commit() - commitID1 := storetypes.CommitID{Version: 1, Hash: res.Data} + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 0}}) + require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - // create a new app with the stores mounted under the same cap key - app = NewBaseApp(name, logger, db, nil, pruningOpt) + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -1}}) + require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - // require we can load the latest version - err = app.LoadVersion(1) - require.Nil(t, err) - testLoadVersionHelper(t, app, int64(1), commitID1) + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 5000000}}) + require.Equal(t, uint64(5000000), app.getMaximumBlockGas(ctx)) - // require error when loading an invalid version - err = app.LoadVersion(2) - require.Error(t, err) + app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -5000000}}) + require.Panics(t, func() { app.getMaximumBlockGas(ctx) }) } func TestLoadVersionPruning(t *testing.T) { @@ -448,1766 +95,25 @@ func TestLoadVersionPruning(t *testing.T) { testLoadVersionHelper(t, app, int64(7), lastCommitID) } -func testLoadVersionHelper(t *testing.T, app *BaseApp, expectedHeight int64, expectedID storetypes.CommitID) { - lastHeight := app.LastBlockHeight() - lastID := app.LastCommitID() - require.Equal(t, expectedHeight, lastHeight) - require.Equal(t, expectedID, lastID) -} - -func TestOptionFunction(t *testing.T) { +// simple one store baseapp +func setupBaseApp(t *testing.T, options ...func(*BaseApp)) *BaseApp { logger := defaultLogger() db := dbm.NewMemDB() - bap := NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) - require.Equal(t, bap.name, "new name", "BaseApp should have had name changed via option function") -} - -func testChangeNameHelper(name string) func(*BaseApp) { - return func(bap *BaseApp) { - bap.name = name - } -} - -// Test that txs can be unmarshalled and read and that -// correct error codes are returned when not -func TestTxDecoder(t *testing.T) { - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - app := newBaseApp(t.Name()) - tx := newTxCounter(1, 0) - txBytes := codec.MustMarshal(tx) - - dTx, err := app.txDecoder(txBytes) - require.NoError(t, err) - - cTx := dTx.(txTest) - require.Equal(t, tx.Counter, cTx.Counter) -} - -// Test that Info returns the latest committed state. -func TestInfo(t *testing.T) { - app := newBaseApp(t.Name()) - - // ----- test an empty response ------- - reqInfo := abci.RequestInfo{} - res := app.Info(reqInfo) - - // should be empty - assert.Equal(t, "", res.Version) - assert.Equal(t, t.Name(), res.GetData()) - assert.Equal(t, int64(0), res.LastBlockHeight) - require.Equal(t, []uint8(nil), res.LastBlockAppHash) - require.Equal(t, app.AppVersion(), res.AppVersion) - // ----- test a proper response ------- - // TODO -} - -func TestBaseAppOptionSeal(t *testing.T) { - app := setupBaseApp(t) - - require.Panics(t, func() { - app.SetName("") - }) - require.Panics(t, func() { - app.SetVersion("") - }) - require.Panics(t, func() { - app.SetDB(nil) - }) - require.Panics(t, func() { - app.SetCMS(nil) - }) - require.Panics(t, func() { - app.SetInitChainer(nil) - }) - require.Panics(t, func() { - app.SetBeginBlocker(nil) - }) - require.Panics(t, func() { - app.SetEndBlocker(nil) - }) - require.Panics(t, func() { - app.SetAnteHandler(nil) - }) - require.Panics(t, func() { - app.SetAddrPeerFilter(nil) - }) - require.Panics(t, func() { - app.SetIDPeerFilter(nil) - }) - require.Panics(t, func() { - app.SetFauxMerkleMode() - }) - require.Panics(t, func() { - app.SetRouter(NewRouter()) - }) -} - -func TestSetMinGasPrices(t *testing.T) { - minGasPrices := sdk.DecCoins{sdk.NewInt64DecCoin("stake", 5000)} - app := newBaseApp(t.Name(), SetMinGasPrices(minGasPrices.String())) - require.Equal(t, minGasPrices, app.minGasPrices) -} - -func TestInitChainer(t *testing.T) { - name := t.Name() - // keep the db and logger ourselves so - // we can reload the same app later - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - capKey := sdk.NewKVStoreKey("main") - capKey2 := sdk.NewKVStoreKey("key2") - app.MountStores(capKey, capKey2) - - // set a value in the store on init chain - key, value := []byte("hello"), []byte("goodbye") - var initChainer sdk.InitChainer = func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { - store := ctx.KVStore(capKey) - store.Set(key, value) - return abci.ResponseInitChain{} - } - - query := abci.RequestQuery{ - Path: "/store/main/key", - Data: key, - } - - // initChainer is nil - nothing happens - app.InitChain(abci.RequestInitChain{}) - res := app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // set initChainer and try again - should see the value - app.SetInitChainer(initChainer) - - // stores are mounted and private members are set - sealing baseapp - err := app.LoadLatestVersion() // needed to make stores non-nil - require.Nil(t, err) - require.Equal(t, int64(0), app.LastBlockHeight()) - - initChainRes := app.InitChain(abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty - - // The AppHash returned by a new chain is the sha256 hash of "". - // $ echo -n '' | sha256sum - // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 - require.Equal( - t, - []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, - initChainRes.AppHash, - ) - - // assert that chainID is set correctly in InitChain - chainID := app.deliverState.ctx.ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") - - chainID = app.checkState.ctx.ChainID() - require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") + app := NewBaseApp(t.Name(), logger, db, nil, options...) + require.Equal(t, t.Name(), app.Name()) - app.Commit() - res = app.Query(query) - require.Equal(t, int64(1), app.LastBlockHeight()) - require.Equal(t, value, res.Value) + app.MountStores(capKey1, capKey2) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - // reload app - app = NewBaseApp(name, logger, db, nil) - app.SetInitChainer(initChainer) - app.MountStores(capKey, capKey2) - err = app.LoadLatestVersion() // needed to make stores non-nil + // stores are mounted + err := app.LoadLatestVersion() require.Nil(t, err) - require.Equal(t, int64(1), app.LastBlockHeight()) - - // ensure we can still query after reloading - res = app.Query(query) - require.Equal(t, value, res.Value) - - // commit and ensure we can still query - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - - res = app.Query(query) - require.Equal(t, value, res.Value) -} - -func TestInitChain_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - - app.InitChain( - abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - app.Commit() - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -func TestBeginBlock_WithInitialHeight(t *testing.T) { - name := t.Name() - db := dbm.NewMemDB() - logger := defaultLogger() - app := NewBaseApp(name, logger, db, nil) - - app.InitChain( - abci.RequestInitChain{ - InitialHeight: 3, - }, - ) - - require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { - app.BeginBlock(abci.RequestBeginBlock{ - Header: tmproto.Header{ - Height: 4, - }, - }) - }) - - app.BeginBlock(abci.RequestBeginBlock{ - Header: tmproto.Header{ - Height: 3, - }, - }) - app.Commit() - - require.Equal(t, int64(3), app.LastBlockHeight()) -} - -// Simple tx with a list of Msgs. -type txTest struct { - Msgs []sdk.Msg - Counter int64 - FailOnAnte bool -} - -func (tx *txTest) setFailOnAnte(fail bool) { - tx.FailOnAnte = fail -} - -func (tx *txTest) setFailOnHandler(fail bool) { - for i, msg := range tx.Msgs { - tx.Msgs[i] = msgCounter{msg.(msgCounter).Counter, fail} - } -} - -// Implements Tx -func (tx txTest) GetMsgs() []sdk.Msg { return tx.Msgs } -func (tx txTest) ValidateBasic() error { return nil } - -const ( - routeMsgCounter = "msgCounter" - routeMsgCounter2 = "msgCounter2" - routeMsgKeyValue = "msgKeyValue" -) - -// ValidateBasic() fails on negative counters. -// Otherwise it's up to the handlers -type msgCounter struct { - Counter int64 - FailOnHandler bool -} - -// dummy implementation of proto.Message -func (msg msgCounter) Reset() {} -func (msg msgCounter) String() string { return "TODO" } -func (msg msgCounter) ProtoMessage() {} - -// Implements Msg -func (msg msgCounter) Route() string { return routeMsgCounter } -func (msg msgCounter) Type() string { return "counter1" } -func (msg msgCounter) GetSignBytes() []byte { return nil } -func (msg msgCounter) GetSigners() []sdk.AccAddress { return nil } -func (msg msgCounter) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -func newTxCounter(counter int64, msgCounters ...int64) *txTest { - msgs := make([]sdk.Msg, 0, len(msgCounters)) - for _, c := range msgCounters { - msgs = append(msgs, msgCounter{c, false}) - } - - return &txTest{msgs, counter, false} -} - -// a msg we dont know how to route -type msgNoRoute struct { - msgCounter -} - -func (tx msgNoRoute) Route() string { return "noroute" } - -// a msg we dont know how to decode -type msgNoDecode struct { - msgCounter -} - -func (tx msgNoDecode) Route() string { return routeMsgCounter } - -// Another counter msg. Duplicate of msgCounter -type msgCounter2 struct { - Counter int64 -} - -// dummy implementation of proto.Message -func (msg msgCounter2) Reset() {} -func (msg msgCounter2) String() string { return "TODO" } -func (msg msgCounter2) ProtoMessage() {} - -// Implements Msg -func (msg msgCounter2) Route() string { return routeMsgCounter2 } -func (msg msgCounter2) Type() string { return "counter2" } -func (msg msgCounter2) GetSignBytes() []byte { return nil } -func (msg msgCounter2) GetSigners() []sdk.AccAddress { return nil } -func (msg msgCounter2) ValidateBasic() error { - if msg.Counter >= 0 { - return nil - } - return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") -} - -// A msg that sets a key/value pair. -type msgKeyValue struct { - Key []byte - Value []byte -} - -func (msg msgKeyValue) Reset() {} -func (msg msgKeyValue) String() string { return "TODO" } -func (msg msgKeyValue) ProtoMessage() {} -func (msg msgKeyValue) Route() string { return routeMsgKeyValue } -func (msg msgKeyValue) Type() string { return "keyValue" } -func (msg msgKeyValue) GetSignBytes() []byte { return nil } -func (msg msgKeyValue) GetSigners() []sdk.AccAddress { return nil } -func (msg msgKeyValue) ValidateBasic() error { - if msg.Key == nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") - } - if msg.Value == nil { - return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") - } - return nil -} - -// amino decode -func testTxDecoder(cdc *codec.LegacyAmino) sdk.TxDecoder { - return func(txBytes []byte) (sdk.Tx, error) { - var tx txTest - if len(txBytes) == 0 { - return nil, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "tx bytes are empty") - } - - err := cdc.Unmarshal(txBytes, &tx) - if err != nil { - return nil, sdkerrors.ErrTxDecode - } - - return tx, nil - } + return app } -func anteHandlerTxTest(t *testing.T, capKey storetypes.StoreKey, storeKey []byte) sdk.AnteHandler { - return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { - store := ctx.KVStore(capKey) - txTest := tx.(txTest) - - if txTest.FailOnAnte { - return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } - - _, err := incrementingCounter(t, store, storeKey, txTest.Counter) - if err != nil { - return ctx, err - } - - ctx.EventManager().EmitEvents( - counterEvent("ante_handler", txTest.Counter), - ) - - ctx = ctx.WithPriority(testTxPriority) - - return ctx, nil - } -} - -func counterEvent(evType string, msgCount int64) sdk.Events { - return sdk.Events{ - sdk.NewEvent( - evType, - sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), - ), - } -} - -func handlerMsgCounter(t *testing.T, capKey storetypes.StoreKey, deliverKey []byte) sdk.Handler { - return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx = ctx.WithEventManager(sdk.NewEventManager()) - store := ctx.KVStore(capKey) - var msgCount int64 - - switch m := msg.(type) { - case *msgCounter: - if m.FailOnHandler { - return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") - } - - msgCount = m.Counter - case *msgCounter2: - msgCount = m.Counter - } - - ctx.EventManager().EmitEvents( - counterEvent(sdk.EventTypeMessage, msgCount), - ) - - res, err := incrementingCounter(t, store, deliverKey, msgCount) - if err != nil { - return nil, err - } - - res.Events = ctx.EventManager().Events().ToABCIEvents() - return res, nil - } -} - -func getIntFromStore(store sdk.KVStore, key []byte) int64 { - bz := store.Get(key) - if len(bz) == 0 { - return 0 - } - i, err := binary.ReadVarint(bytes.NewBuffer(bz)) - if err != nil { - panic(err) - } - return i -} - -func setIntOnStore(store sdk.KVStore, key []byte, i int64) { - bz := make([]byte, 8) - n := binary.PutVarint(bz, i) - store.Set(key, bz[:n]) -} - -// check counter matches what's in store. -// increment and store -func incrementingCounter(t *testing.T, store sdk.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { - storedCounter := getIntFromStore(store, counterKey) - require.Equal(t, storedCounter, counter) - setIntOnStore(store, counterKey, counter+1) - return &sdk.Result{}, nil -} - -//--------------------------------------------------------------------- -// Tx processing - CheckTx, DeliverTx, SimulateTx. -// These tests use the serialized tx as input, while most others will use the -// Check(), Deliver(), Simulate() methods directly. -// Ensure that Check/Deliver/Simulate work as expected with the store. - -// Test that successive CheckTx can see each others' effects -// on the store within a block, and that the CheckTx state -// gets reset to the latest committed state during Commit -func TestCheckTx(t *testing.T) { - // This ante handler reads the key and checks that the value matches the current counter. - // This ensures changes to the kvstore persist across successive CheckTx. - counterKey := []byte("counter-key") - - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } - routerOpt := func(bapp *BaseApp) { - // TODO: can remove this once CheckTx doesnt process msgs. - bapp.Router().AddRoute(sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - })) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - nTxs := int64(5) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - for i := int64(0); i < nTxs; i++ { - tx := newTxCounter(i, 0) // no messages - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - r := app.CheckTx(abci.RequestCheckTx{Tx: txBytes}) - require.Equal(t, testTxPriority, r.Priority) - require.Empty(t, r.GetEvents()) - require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) - } - - checkStateStore := app.checkState.ctx.KVStore(capKey1) - storedCounter := getIntFromStore(checkStateStore, counterKey) - - // Ensure AnteHandler ran - require.Equal(t, nTxs, storedCounter) - - // If a block is committed, CheckTx state should be reset. - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) - - require.NotNil(t, app.checkState.ctx.BlockGasMeter(), "block gas meter should have been set to checkState") - require.NotEmpty(t, app.checkState.ctx.HeaderHash()) - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - - checkStateStore = app.checkState.ctx.KVStore(capKey1) - storedBytes := checkStateStore.Get(counterKey) - require.Nil(t, storedBytes) -} - -// Test that successive DeliverTx can see each others' effects -// on the store, both within and across blocks. -func TestDeliverTx(t *testing.T) { - // test increments in the ante - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // test increments in the handler - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - header := tmproto.Header{Height: int64(blockN) + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(counter, counter) - - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - events := res.GetEvents() - require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") - require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") - } - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -// Number of messages doesn't matter to CheckTx. -func TestMultiMsgCheckTx(t *testing.T) { - // TODO: ensure we get the same results - // with one message or many -} - -// One call to DeliverTx should process all the messages, in order. -func TestMultiMsgDeliverTx(t *testing.T) { - // increment the tx counter - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // increment the msg counter - deliverKey := []byte("deliver-key") - deliverKey2 := []byte("deliver-key2") - routerOpt := func(bapp *BaseApp) { - r1 := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - r2 := sdk.NewRoute(routeMsgCounter2, handlerMsgCounter(t, capKey1, deliverKey2)) - bapp.Router().AddRoute(r1) - bapp.Router().AddRoute(r2) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - // run a multi-msg tx - // with all msgs the same route - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - tx := newTxCounter(0, 0, 1, 2) - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - store := app.deliverState.ctx.KVStore(capKey1) - - // tx counter only incremented once - txCounter := getIntFromStore(store, anteKey) - require.Equal(t, int64(1), txCounter) - - // msg counter incremented three times - msgCounter := getIntFromStore(store, deliverKey) - require.Equal(t, int64(3), msgCounter) - - // replace the second message with a msgCounter2 - - tx = newTxCounter(1, 3) - tx.Msgs = append(tx.Msgs, msgCounter2{0}) - tx.Msgs = append(tx.Msgs, msgCounter2{1}) - txBytes, err = codec.Marshal(tx) - require.NoError(t, err) - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - store = app.deliverState.ctx.KVStore(capKey1) - - // tx counter only incremented once - txCounter = getIntFromStore(store, anteKey) - require.Equal(t, int64(2), txCounter) - - // original counter increments by one - // new counter increments by two - msgCounter = getIntFromStore(store, deliverKey) - require.Equal(t, int64(4), msgCounter) - msgCounter2 := getIntFromStore(store, deliverKey2) - require.Equal(t, int64(2), msgCounter2) -} - -// Interleave calls to Check and Deliver and ensure -// that there is no cross-talk. Check sees results of the previous Check calls -// and Deliver sees that of the previous Deliver calls, but they don't see eachother. -func TestConcurrentCheckDeliver(t *testing.T) { - // TODO -} - -// Simulate a transaction that uses gas to compute the gas. -// Simulate() and Query("/app/simulate", txBytes) should give -// the same results. -func TestSimulateTx(t *testing.T) { - gasConsumed := uint64(5) - - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasConsumed)) - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - ctx.GasMeter().ConsumeGas(gasConsumed, "test") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - nBlocks := 3 - for blockN := 0; blockN < nBlocks; blockN++ { - count := int64(blockN + 1) - header := tmproto.Header{Height: count} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - tx := newTxCounter(count, count) - txBytes, err := cdc.Marshal(tx) - require.Nil(t, err) - - // simulate a message, check gas reported - gInfo, result, err := app.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate again, same result - gInfo, result, err = app.Simulate(txBytes) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, gasConsumed, gInfo.GasUsed) - - // simulate by calling Query with encoded tx - query := abci.RequestQuery{ - Path: "/app/simulate", - Data: txBytes, - } - queryResult := app.Query(query) - require.True(t, queryResult.IsOK(), queryResult.Log) - - var simRes sdk.SimulationResponse - require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) - - require.Equal(t, gInfo, simRes.GasInfo) - require.Equal(t, result.Log, simRes.Result.Log) - require.Equal(t, result.Events, simRes.Result.Events) - require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -func TestRunInvalidTransaction(t *testing.T) { - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - return - }) - } - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // transaction with no messages - { - emptyTx := &txTest{} - _, result, err := app.SimDeliver(aminoTxEncoder(), emptyTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), code, err) - } - - // transaction where ValidateBasic fails - { - testCases := []struct { - tx *txTest - fail bool - }{ - {newTxCounter(0, 0), false}, - {newTxCounter(-1, 0), false}, - {newTxCounter(100, 100), false}, - {newTxCounter(100, 5, 4, 3, 2, 1), false}, - - {newTxCounter(0, -1), true}, - {newTxCounter(0, 1, -2), true}, - {newTxCounter(0, 1, 2, -10, 5), true}, - } - - for _, testCase := range testCases { - tx := testCase.tx - _, result, err := app.SimDeliver(aminoTxEncoder(), tx) - - if testCase.fail { - require.Error(t, err) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) - } else { - require.NotNil(t, result) - } - } - } - - // transaction with no known route - { - unknownRouteTx := txTest{[]sdk.Msg{msgNoRoute{}}, 0, false} - _, result, err := app.SimDeliver(aminoTxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - - unknownRouteTx = txTest{[]sdk.Msg{msgCounter{}, msgNoRoute{}}, 0, false} - _, result, err = app.SimDeliver(aminoTxEncoder(), unknownRouteTx) - require.Error(t, err) - require.Nil(t, result) - - space, code, _ = sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) - } - - // Transaction with an unregistered message - { - tx := newTxCounter(0, 0) - tx.Msgs = append(tx.Msgs, msgNoDecode{}) - - // new codec so we can encode the tx, but we shouldn't be able to decode - newCdc := codec.NewLegacyAmino() - registerTestCodec(newCdc) - newCdc.RegisterConcrete(&msgNoDecode{}, "cosmos-sdk/baseapp/msgNoDecode", nil) - - txBytes, err := newCdc.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) - require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) - } -} - -// Test that transactions exceeding gas limits fail -func TestTxGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - // AnteHandlers must have their own defer/recover in order for the BaseApp - // to know how much gas was used! This is because the GasMeter is created in - // the AnteHandler, but if it panics the context won't be set properly in - // runTx's recover call. - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return newCtx, nil - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - testCases := []struct { - tx *txTest - gasUsed uint64 - fail bool - }{ - {newTxCounter(0, 0), 0, false}, - {newTxCounter(1, 1), 2, false}, - {newTxCounter(9, 1), 10, false}, - {newTxCounter(1, 9), 10, false}, - {newTxCounter(10, 0), 10, false}, - {newTxCounter(0, 10), 10, false}, - {newTxCounter(0, 8, 2), 10, false}, - {newTxCounter(0, 5, 1, 1, 1, 1, 1), 10, false}, - {newTxCounter(0, 5, 1, 1, 1, 1), 9, false}, - - {newTxCounter(9, 2), 11, true}, - {newTxCounter(2, 9), 11, true}, - {newTxCounter(9, 1, 1), 11, true}, - {newTxCounter(1, 8, 1, 1), 11, true}, - {newTxCounter(11, 0), 11, true}, - {newTxCounter(0, 11), 11, true}, - {newTxCounter(0, 5, 11), 16, true}, - } - - for i, tc := range testCases { - tx := tc.tx - gInfo, result, err := app.SimDeliver(aminoTxEncoder(), tx) - - // check gas used and wanted - require.Equal(t, tc.gasUsed, gInfo.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, gInfo, result, err)) - - // check for out of gas - if !tc.fail { - require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) - } else { - require.Error(t, err) - require.Nil(t, result) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - } - } -} - -// Test that transactions exceeding gas limits fail -func TestMaxBlockGasLimits(t *testing.T) { - gasGranted := uint64(10) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) - default: - panic(r) - } - } - }() - - count := tx.(txTest).Counter - newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 100, - }, - }, - }) - - testCases := []struct { - tx *txTest - numDelivers int - gasUsedPerDeliver uint64 - fail bool - failAfterDeliver int - }{ - {newTxCounter(0, 0), 0, 0, false, 0}, - {newTxCounter(9, 1), 2, 10, false, 0}, - {newTxCounter(10, 0), 3, 10, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, - {newTxCounter(2, 7), 11, 9, false, 0}, - {newTxCounter(10, 0), 10, 10, false, 0}, // hit the limit but pass - - {newTxCounter(10, 0), 11, 10, true, 10}, - {newTxCounter(10, 0), 15, 10, true, 10}, - {newTxCounter(9, 0), 12, 9, true, 11}, // fly past the limit - } - - for i, tc := range testCases { - tx := tc.tx - - // reset the block gas - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute the transaction multiple times - for j := 0; j < tc.numDelivers; j++ { - _, result, err := app.SimDeliver(aminoTxEncoder(), tx) - - ctx := app.getState(runTxModeDeliver).ctx - - // check for failed transactions - if tc.fail && (j+1) > tc.failAfterDeliver { - require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) - - space, code, _ := sdkerrors.ABCIInfo(err, false) - require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) - require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) - require.True(t, ctx.BlockGasMeter().IsOutOfGas()) - } else { - // check gas used and wanted - blockGasUsed := ctx.BlockGasMeter().GasConsumed() - expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) - require.Equal( - t, expBlockGasUsed, blockGasUsed, - fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), - ) - - require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) - require.False(t, ctx.BlockGasMeter().IsPastLimit()) - } - } - } -} - -// Test custom panic handling within app.DeliverTx method -func TestCustomRunTxPanicHandler(t *testing.T) { - const customPanicMsg = "test panic" - anteErr := sdkerrors.Register("fakeModule", 100500, "fakeError") - - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - panic(sdkerrors.Wrap(anteErr, "anteHandler")) - }) - } - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - header := tmproto.Header{Height: 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { - err, ok := recoveryObj.(error) - if !ok { - return nil - } - - if anteErr.Is(err) { - panic(customPanicMsg) - } else { - return nil - } - }) - - // Transaction should panic with custom handler above - { - tx := newTxCounter(0, 0) - - require.PanicsWithValue(t, customPanicMsg, func() { app.SimDeliver(aminoTxEncoder(), tx) }) - } -} - -func TestBaseAppAnteHandler(t *testing.T) { - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) - } - - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - cdc := codec.NewLegacyAmino() - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - registerTestCodec(cdc) - - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - // execute a tx that will fail ante handler execution - // - // NOTE: State should not be mutated here. This will be implicitly checked by - // the next txs ante handler execution (anteHandlerTxTest). - tx := newTxCounter(0, 0) - tx.setFailOnAnte(true) - txBytes, err := cdc.Marshal(tx) - require.NoError(t, err) - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.Empty(t, res.Events) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx := app.getState(runTxModeDeliver).ctx - store := ctx.KVStore(capKey1) - require.Equal(t, int64(0), getIntFromStore(store, anteKey)) - - // execute at tx that will pass the ante handler (the checkTx state should - // mutate) but will fail the message handler - tx = newTxCounter(0, 0) - tx.setFailOnHandler(true) - - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - // should emit ante event - require.NotEmpty(t, res.Events) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx = app.getState(runTxModeDeliver).ctx - store = ctx.KVStore(capKey1) - require.Equal(t, int64(1), getIntFromStore(store, anteKey)) - require.Equal(t, int64(0), getIntFromStore(store, deliverKey)) - - // execute a successful ante handler and message execution where state is - // implicitly checked by previous tx executions - tx = newTxCounter(1, 0) - - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.NotEmpty(t, res.Events) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - - ctx = app.getState(runTxModeDeliver).ctx - store = ctx.KVStore(capKey1) - require.Equal(t, int64(2), getIntFromStore(store, anteKey)) - require.Equal(t, int64(1), getIntFromStore(store, deliverKey)) - - // commit - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() -} - -func TestGasConsumptionBadTx(t *testing.T) { - gasWanted := uint64(5) - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasWanted)) - - defer func() { - if r := recover(); r != nil { - switch rType := r.(type) { - case sdk.ErrorOutOfGas: - log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) - err = sdkerrors.Wrap(sdkerrors.ErrOutOfGas, log) - default: - panic(r) - } - } - }() - - txTest := tx.(txTest) - newCtx.GasMeter().ConsumeGas(uint64(txTest.Counter), "counter-ante") - if txTest.FailOnAnte { - return newCtx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") - } - - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - count := msg.(*msgCounter).Counter - ctx.GasMeter().ConsumeGas(uint64(count), "counter-handler") - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - cdc := codec.NewLegacyAmino() - registerTestCodec(cdc) - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 9, - }, - }, - }) - - app.InitChain(abci.RequestInitChain{}) - - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - tx := newTxCounter(5, 0) - tx.setFailOnAnte(true) - txBytes, err := cdc.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) - - // require next tx to fail due to black gas limit - tx = newTxCounter(5, 0) - txBytes, err = cdc.Marshal(tx) - require.NoError(t, err) - - res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) -} - -// Test that we can only query from the latest committed state. -func TestQuery(t *testing.T) { - key, value := []byte("hello"), []byte("goodbye") - anteOpt := func(bapp *BaseApp) { - bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return - }) - } - - routerOpt := func(bapp *BaseApp) { - r := sdk.NewRoute(routeMsgCounter, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - store := ctx.KVStore(capKey1) - store.Set(key, value) - return &sdk.Result{}, nil - }) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - - app.InitChain(abci.RequestInitChain{}) - - // NOTE: "/store/key1" tells us KVStore - // and the final "/key" says to use the data as the - // key in the given KVStore ... - query := abci.RequestQuery{ - Path: "/store/key1/key", - Data: key, - } - tx := newTxCounter(0, 0) - - // query is empty before we do anything - res := app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a CheckTx - _, resTx, err := app.SimCheck(aminoTxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - res = app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query is still empty after a DeliverTx before we commit - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - _, resTx, err = app.SimDeliver(aminoTxEncoder(), tx) - require.NoError(t, err) - require.NotNil(t, resTx) - res = app.Query(query) - require.Equal(t, 0, len(res.Value)) - - // query returns correct value after Commit - app.Commit() - res = app.Query(query) - require.Equal(t, value, res.Value) -} - -func TestGRPCQuery(t *testing.T) { - grpcQueryOpt := func(bapp *BaseApp) { - testdata.RegisterQueryServer( - bapp.GRPCQueryRouter(), - testdata.QueryImpl{}, - ) - } - - app := setupBaseApp(t, grpcQueryOpt) - app.GRPCQueryRouter().SetInterfaceRegistry(codectypes.NewInterfaceRegistry()) - - app.InitChain(abci.RequestInitChain{}) - header := tmproto.Header{Height: app.LastBlockHeight() + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - app.Commit() - - req := testdata.SayHelloRequest{Name: "foo"} - reqBz, err := req.Marshal() - require.NoError(t, err) - - reqQuery := abci.RequestQuery{ - Data: reqBz, - Path: "/testdata.Query/SayHello", - } - - resQuery := app.Query(reqQuery) - - require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) - - var res testdata.SayHelloResponse - err = res.Unmarshal(resQuery.Value) - require.NoError(t, err) - require.Equal(t, "Hello foo!", res.Greeting) -} - -// Test p2p filter queries -func TestP2PQuery(t *testing.T) { - addrPeerFilterOpt := func(bapp *BaseApp) { - bapp.SetAddrPeerFilter(func(addrport string) abci.ResponseQuery { - require.Equal(t, "1.1.1.1:8000", addrport) - return abci.ResponseQuery{Code: uint32(3)} - }) - } - - idPeerFilterOpt := func(bapp *BaseApp) { - bapp.SetIDPeerFilter(func(id string) abci.ResponseQuery { - require.Equal(t, "testid", id) - return abci.ResponseQuery{Code: uint32(4)} - }) - } - - app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) - - addrQuery := abci.RequestQuery{ - Path: "/p2p/filter/addr/1.1.1.1:8000", - } - res := app.Query(addrQuery) - require.Equal(t, uint32(3), res.Code) - - idQuery := abci.RequestQuery{ - Path: "/p2p/filter/id/testid", - } - res = app.Query(idQuery) - require.Equal(t, uint32(4), res.Code) -} - -func TestGetMaximumBlockGas(t *testing.T) { - app := setupBaseApp(t) - app.InitChain(abci.RequestInitChain{}) - ctx := app.NewContext(true, tmproto.Header{}) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 0}}) - require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -1}}) - require.Equal(t, uint64(0), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: 5000000}}) - require.Equal(t, uint64(5000000), app.getMaximumBlockGas(ctx)) - - app.StoreConsensusParams(ctx, &abci.ConsensusParams{Block: &abci.BlockParams{MaxGas: -5000000}}) - require.Panics(t, func() { app.getMaximumBlockGas(ctx) }) -} - -func TestListSnapshots(t *testing.T) { - setupConfig := &setupConfig{ - blocks: 5, - blockTxs: 4, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - - app, err := setupBaseAppWithSnapshots(t, setupConfig) - require.NoError(t, err) - - resp := app.ListSnapshots(abci.RequestListSnapshots{}) - for _, s := range resp.Snapshots { - assert.NotEmpty(t, s.Hash) - assert.NotEmpty(t, s.Metadata) - s.Hash = nil - s.Metadata = nil - } - assert.Equal(t, abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ - {Height: 4, Format: snapshottypes.CurrentFormat, Chunks: 2}, - {Height: 2, Format: snapshottypes.CurrentFormat, Chunks: 1}, - }}, resp) -} - -func TestSnapshotWithPruning(t *testing.T) { - testcases := map[string]struct { - config *setupConfig - expectedSnapshots []*abci.Snapshot - expectedErr error - }{ - "prune nothing with snapshot": { - config: &setupConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 1, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 20, Format: 2, Chunks: 5}, - }, - }, - "prune everything with snapshot": { - config: &setupConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 1, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 20, Format: 2, Chunks: 5}, - }, - }, - "default pruning with snapshot": { - config: &setupConfig{ - blocks: 20, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 1, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 20, Format: 2, Chunks: 5}, - }, - }, - "custom": { - config: &setupConfig{ - blocks: 25, - blockTxs: 2, - snapshotInterval: 5, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewCustomPruningOptions(12, 12), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 25, Format: 2, Chunks: 6}, - {Height: 20, Format: 2, Chunks: 5}, - }, - }, - "no snapshots": { - config: &setupConfig{ - blocks: 10, - blockTxs: 2, - snapshotInterval: 0, // 0 implies disable snapshots - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - }, - expectedSnapshots: []*abci.Snapshot{}, - }, - "keep all snapshots": { - config: &setupConfig{ - blocks: 10, - blockTxs: 2, - snapshotInterval: 3, - snapshotKeepRecent: 0, // 0 implies keep all snapshots - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - }, - expectedSnapshots: []*abci.Snapshot{ - {Height: 9, Format: 2, Chunks: 2}, - {Height: 6, Format: 2, Chunks: 2}, - {Height: 3, Format: 2, Chunks: 1}, - }, - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - app, err := setupBaseAppWithSnapshots(t, tc.config) - - if tc.expectedErr != nil { - require.Error(t, err) - require.Equal(t, tc.expectedErr.Error(), err.Error()) - return - } - require.NoError(t, err) - - resp := app.ListSnapshots(abci.RequestListSnapshots{}) - for _, s := range resp.Snapshots { - assert.NotEmpty(t, s.Hash) - assert.NotEmpty(t, s.Metadata) - s.Hash = nil - s.Metadata = nil - } - fmt.Println(resp) - assert.Equal(t, abci.ResponseListSnapshots{Snapshots: tc.expectedSnapshots}, resp) - - // Validate that heights were pruned correctly by querying the state at the last height that should be present relative to latest - // and the first height that should be pruned. - // - // Exceptions: - // * Prune nothing: should be able to query all heights (we only test first and latest) - // * Prune default: should be able to query all heights (we only test first and latest) - // * The reason for default behaving this way is that we only commit 20 heights but default has 100_000 keep-recent - var lastExistingHeight int64 - if tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { - lastExistingHeight = 1 - } else { - // Integer division rounds down so by multiplying back we get the last height at which we pruned - lastExistingHeight = int64((tc.config.blocks/tc.config.pruningOpts.Interval)*tc.config.pruningOpts.Interval - tc.config.pruningOpts.KeepRecent) - } - - // Query 1 - res := app.Query(abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight}) - require.NotNil(t, res, "height: %d", lastExistingHeight) - require.NotNil(t, res.Value, "height: %d", lastExistingHeight) - - // Query 2 - res = app.Query(abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight - 1}) - require.NotNil(t, res, "height: %d", lastExistingHeight-1) - if tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { - // With prune nothing or default, we query height 0 which translates to the latest height. - require.NotNil(t, res.Value, "height: %d", lastExistingHeight-1) - } - }) - } -} - -func TestLoadSnapshotChunk(t *testing.T) { - setupConfig := &setupConfig{ - blocks: 2, - blockTxs: 5, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - app, err := setupBaseAppWithSnapshots(t, setupConfig) - require.NoError(t, err) - - testcases := map[string]struct { - height uint64 - format uint32 - chunk uint32 - expectEmpty bool - }{ - "Existing snapshot": {2, snapshottypes.CurrentFormat, 1, false}, - "Missing height": {100, snapshottypes.CurrentFormat, 1, true}, - "Missing format": {2, 3, 1, true}, - "Missing chunk": {2, snapshottypes.CurrentFormat, 9, true}, - "Zero height": {0, snapshottypes.CurrentFormat, 1, true}, - "Zero format": {2, 0, 1, true}, - "Zero chunk": {2, snapshottypes.CurrentFormat, 0, false}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - resp := app.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ - Height: tc.height, - Format: tc.format, - Chunk: tc.chunk, - }) - if tc.expectEmpty { - assert.Equal(t, abci.ResponseLoadSnapshotChunk{}, resp) - return - } - assert.NotEmpty(t, resp.Chunk) - }) - } -} - -func TestOfferSnapshot_Errors(t *testing.T) { - // Set up app before test cases, since it's fairly expensive. - setupConfig := &setupConfig{ - blocks: 0, - blockTxs: 0, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - app, err := setupBaseAppWithSnapshots(t, setupConfig) - require.NoError(t, err) - - m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} - metadata, err := m.Marshal() - require.NoError(t, err) - hash := []byte{1, 2, 3} - - testcases := map[string]struct { - snapshot *abci.Snapshot - result abci.ResponseOfferSnapshot_Result - }{ - "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, - "invalid format": {&abci.Snapshot{ - Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, - "incorrect chunk count": {&abci.Snapshot{ - Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 2, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "no chunks": {&abci.Snapshot{ - Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: metadata, - }, abci.ResponseOfferSnapshot_REJECT}, - "invalid metadata serialization": {&abci.Snapshot{ - Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, - }, abci.ResponseOfferSnapshot_REJECT}, - } - for name, tc := range testcases { - tc := tc - t.Run(name, func(t *testing.T) { - resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) - assert.Equal(t, tc.result, resp.Result) - }) - } - - // Offering a snapshot after one has been accepted should error - resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 1, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) - - resp = app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ - Height: 2, - Format: snapshottypes.CurrentFormat, - Chunks: 3, - Hash: []byte{1, 2, 3}, - Metadata: metadata, - }}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) -} - -func TestApplySnapshotChunk(t *testing.T) { - setupConfig1 := &setupConfig{ - blocks: 4, - blockTxs: 10, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - source, err := setupBaseAppWithSnapshots(t, setupConfig1) - require.NoError(t, err) - - setupConfig2 := &setupConfig{ - blocks: 0, - blockTxs: 0, - snapshotInterval: 2, - snapshotKeepRecent: 2, - pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), - } - target, err := setupBaseAppWithSnapshots(t, setupConfig2) - require.NoError(t, err) - - // Fetch latest snapshot to restore - respList := source.ListSnapshots(abci.RequestListSnapshots{}) - require.NotEmpty(t, respList.Snapshots) - snapshot := respList.Snapshots[0] - - // Make sure the snapshot has at least 3 chunks - require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") - - // Begin a snapshot restoration in the target - respOffer := target.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: snapshot}) - require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) - - // We should be able to pass an invalid chunk and get a verify failure, before reapplying it. - respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ - Index: 0, - Chunk: []byte{9}, - Sender: "sender", - }) - require.Equal(t, abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_RETRY, - RefetchChunks: []uint32{0}, - RejectSenders: []string{"sender"}, - }, respApply) - - // Fetch each chunk from the source and apply it to the target - for index := uint32(0); index < snapshot.Chunks; index++ { - respChunk := source.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ - Height: snapshot.Height, - Format: snapshot.Format, - Chunk: index, - }) - require.NotNil(t, respChunk.Chunk) - respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ - Index: index, - Chunk: respChunk.Chunk, - }) - require.Equal(t, abci.ResponseApplySnapshotChunk{ - Result: abci.ResponseApplySnapshotChunk_ACCEPT, - }, respApply) - } - - // The target should now have the same hash as the source - assert.Equal(t, source.LastCommitID(), target.LastCommitID()) -} - -// NOTE: represents a new custom router for testing purposes of WithRouter() -type testCustomRouter struct { - routes sync.Map -} - -func (rtr *testCustomRouter) AddRoute(route sdk.Route) sdk.Router { - rtr.routes.Store(route.Path(), route.Handler()) - return rtr -} - -func (rtr *testCustomRouter) Route(ctx sdk.Context, path string) sdk.Handler { - if v, ok := rtr.routes.Load(path); ok { - if h, ok := v.(sdk.Handler); ok { - return h - } - } - return nil -} - -func TestWithRouter(t *testing.T) { - // test increments in the ante - anteKey := []byte("ante-key") - anteOpt := func(bapp *BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } - - // test increments in the handler - deliverKey := []byte("deliver-key") - routerOpt := func(bapp *BaseApp) { - bapp.SetRouter(&testCustomRouter{routes: sync.Map{}}) - r := sdk.NewRoute(routeMsgCounter, handlerMsgCounter(t, capKey1, deliverKey)) - bapp.Router().AddRoute(r) - } - - app := setupBaseApp(t, anteOpt, routerOpt) - app.InitChain(abci.RequestInitChain{}) - - // Create same codec used in txDecoder - codec := codec.NewLegacyAmino() - registerTestCodec(codec) - - nBlocks := 3 - txPerHeight := 5 - - for blockN := 0; blockN < nBlocks; blockN++ { - header := tmproto.Header{Height: int64(blockN) + 1} - app.BeginBlock(abci.RequestBeginBlock{Header: header}) - - for i := 0; i < txPerHeight; i++ { - counter := int64(blockN*txPerHeight + i) - tx := newTxCounter(counter, counter) - - txBytes, err := codec.Marshal(tx) - require.NoError(t, err) - - res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) - require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) - } - - app.EndBlock(abci.RequestEndBlock{}) - app.Commit() - } -} - -func TestBaseApp_EndBlock(t *testing.T) { - db := dbm.NewMemDB() - name := t.Name() - logger := defaultLogger() - - cp := &abci.ConsensusParams{ - Block: &abci.BlockParams{ - MaxGas: 5000000, - }, - } - - app := NewBaseApp(name, logger, db, nil) - app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) - app.InitChain(abci.RequestInitChain{ - ConsensusParams: cp, - }) - - app.SetEndBlocker(func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { - return abci.ResponseEndBlock{ - ValidatorUpdates: []abci.ValidatorUpdate{ - {Power: 100}, - }, - } - }) - app.Seal() - - res := app.EndBlock(abci.RequestEndBlock{}) - require.Len(t, res.GetValidatorUpdates(), 1) - require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) - require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) +func testLoadVersionHelper(t *testing.T, app *BaseApp, expectedHeight int64, expectedID storetypes.CommitID) { + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, expectedHeight, lastHeight) + require.Equal(t, expectedID, lastID) } diff --git a/baseapp/block_gas_test.go b/baseapp/block_gas_test.go index 815fff51fc4b..280b3bd4f2dc 100644 --- a/baseapp/block_gas_test.go +++ b/baseapp/block_gas_test.go @@ -1,7 +1,7 @@ package baseapp_test import ( - "fmt" + "context" "math" "testing" @@ -12,9 +12,8 @@ import ( tmproto "github.com/tendermint/tendermint/proto/tendermint/types" dbm "github.com/tendermint/tm-db" - bankmodulev1 "cosmossdk.io/api/cosmos/bank/module/v1" "cosmossdk.io/depinject" - "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" "github.com/cosmos/cosmos-sdk/client" "github.com/cosmos/cosmos-sdk/client/tx" "github.com/cosmos/cosmos-sdk/codec" @@ -39,6 +38,22 @@ import ( var blockMaxGas = uint64(simtestutil.DefaultConsensusParams.Block.MaxGas) +type BlockGasImpl struct { + panicTx bool + gasToConsume uint64 + key store.StoreKey +} + +func (m BlockGasImpl) Set(ctx context.Context, msg *baseapptestutil.MsgKeyValue) (*baseapptestutil.MsgCreateKeyValueResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.KVStore(m.key).Set(msg.Key, msg.Value) + sdkCtx.GasMeter().ConsumeGas(m.gasToConsume, "TestMsg") + if m.panicTx { + panic("panic in tx execution") + } + return &baseapptestutil.MsgCreateKeyValueResponse{}, nil +} + func TestBaseApp_BlockGas(t *testing.T) { testcases := []struct { name string @@ -68,25 +83,7 @@ func TestBaseApp_BlockGas(t *testing.T) { err error ) - appConfig := depinject.Configs(makeTestConfig(), - depinject.ProvideInModule(banktypes.ModuleName, - func(_ *bankmodulev1.Module, key *store.KVStoreKey) runtime.BaseAppOption { - return func(app *baseapp.BaseApp) { - route := (&testdata.TestMsg{}).Route() - app.Router().AddRoute(sdk.NewRoute(route, func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - _, ok := msg.(*testdata.TestMsg) - if !ok { - return &sdk.Result{}, fmt.Errorf("Wrong Msg type, expected %T, got %T", (*testdata.TestMsg)(nil), msg) - } - ctx.KVStore(key).Set([]byte("ok"), []byte("ok")) - ctx.GasMeter().ConsumeGas(tc.gasToConsume, "TestMsg") - if tc.panicTx { - panic("panic in tx execution") - } - return &sdk.Result{}, nil - })) - } - })) + appConfig := depinject.Configs(makeTestConfig()) err = depinject.Inject(appConfig, &bankKeeper, @@ -105,9 +102,13 @@ func TestBaseApp_BlockGas(t *testing.T) { require.NoError(t, err) t.Run(tc.name, func(t *testing.T) { - interfaceRegistry.RegisterImplementations((*sdk.Msg)(nil), - &testdata.TestMsg{}, - ) + baseapptestutil.RegisterInterfaces(interfaceRegistry) + baseapptestutil.RegisterKeyValueServer(bapp.MsgServiceRouter(), BlockGasImpl{ + panicTx: tc.panicTx, + gasToConsume: tc.gasToConsume, + key: bapp.UnsafeFindStoreKey(banktypes.ModuleName), + }) + genState := GenesisStateWithSingleValidator(t, cdc, appBuilder) stateBytes, err := tmjson.MarshalIndent(genState, "", " ") require.NoError(t, err) @@ -134,7 +135,11 @@ func TestBaseApp_BlockGas(t *testing.T) { require.Equal(t, uint64(0), seq) // msg and signatures - msg := testdata.NewTestMsg(addr1) + msg := &baseapptestutil.MsgKeyValue{ + Key: []byte("ok"), + Value: []byte("ok"), + Signer: addr1.String(), + } txBuilder := txConfig.NewTxBuilder() @@ -166,7 +171,7 @@ func TestBaseApp_BlockGas(t *testing.T) { require.Equal(t, []byte("ok"), okValue) } // check block gas is always consumed - baseGas := uint64(52744) // baseGas is the gas consumed before tx msg + baseGas := uint64(52864) // baseGas is the gas consumed before tx msg expGasConsumed := addUint64Saturating(tc.gasToConsume, baseGas) if expGasConsumed > txtypes.MaxGasWanted { // capped by gasLimit diff --git a/baseapp/deliver_tx_test.go b/baseapp/deliver_tx_test.go new file mode 100644 index 000000000000..601a5b642a57 --- /dev/null +++ b/baseapp/deliver_tx_test.go @@ -0,0 +1,2186 @@ +package baseapp_test + +import ( + "bytes" + "context" + "encoding/binary" + "encoding/json" + "fmt" + "math/rand" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "testing" + "time" + "unsafe" + + "cosmossdk.io/depinject" + "github.com/cosmos/cosmos-sdk/baseapp" + baseapptestutil "github.com/cosmos/cosmos-sdk/baseapp/testutil" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" + pruningtypes "github.com/cosmos/cosmos-sdk/pruning/types" + "github.com/cosmos/cosmos-sdk/runtime" + "github.com/cosmos/cosmos-sdk/snapshots" + snapshottypes "github.com/cosmos/cosmos-sdk/snapshots/types" + "github.com/cosmos/cosmos-sdk/store/rootmulti" + storetypes "github.com/cosmos/cosmos-sdk/store/types" + "github.com/cosmos/cosmos-sdk/testutil" + "github.com/cosmos/cosmos-sdk/testutil/testdata" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/auth/signing" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + "github.com/gogo/protobuf/jsonpb" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + dbm "github.com/tendermint/tm-db" +) + +var ( + capKey1 = sdk.NewKVStoreKey("key1") + capKey2 = sdk.NewKVStoreKey("key2") + + // testTxPriority is the CheckTx priority that we set in the test + // antehandler. + testTxPriority = int64(42) +) + +type setupConfig struct { + blocks uint64 + blockTxs int + snapshotInterval uint64 + snapshotKeepRecent uint32 + pruningOpts pruningtypes.PruningOptions +} + +func defaultLogger() log.Logger { + return log.NewTMLogger(log.NewSyncWriter(os.Stdout)).With("module", "sdk/app") +} + +// simple one store baseapp +func setupBaseApp(t *testing.T, options ...func(*baseapp.BaseApp)) *baseapp.BaseApp { + cdc := codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + + logger := defaultLogger() + db := dbm.NewMemDB() + app := baseapp.NewBaseApp(t.Name(), logger, db, txConfig.TxDecoder(), options...) + require.Equal(t, t.Name(), app.Name()) + + app.MountStores(capKey1, capKey2) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + + // stores are mounted + err := app.LoadLatestVersion() + require.Nil(t, err) + return app +} + +// simple one store baseapp with data and snapshots. Each tx is 1 MB in size (uncompressed). +func setupBaseAppWithSnapshots(t *testing.T, config *setupConfig) (*baseapp.BaseApp, error) { + snapshotTimeout := 1 * time.Minute + snapshotStore, err := snapshots.NewStore(dbm.NewMemDB(), testutil.GetTempDir(t)) + require.NoError(t, err) + + app := setupBaseApp(t, baseapp.SetSnapshot(snapshotStore, snapshottypes.NewSnapshotOptions(config.snapshotInterval, uint32(config.snapshotKeepRecent))), baseapp.SetPruning(config.pruningOpts)) + + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + app.SetMsgServiceRouter(baseapp.NewMsgServiceRouter()) + app.SetInterfaceRegistry(registry) + + baseapptestutil.RegisterKeyValueServer(app.MsgServiceRouter(), MsgKeyValueImpl{}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + r := rand.New(rand.NewSource(3920758213583)) + keyCounter := 0 + for height := int64(1); height <= int64(config.blocks); height++ { + app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: height}}) + for txNum := 0; txNum < config.blockTxs; txNum++ { + msgs := []sdk.Msg{} + for msgNum := 0; msgNum < 100; msgNum++ { + key := []byte(fmt.Sprintf("%v", keyCounter)) + value := make([]byte, 10000) + _, err := r.Read(value) + require.NoError(t, err) + msgs = append(msgs, &baseapptestutil.MsgKeyValue{Key: key, Value: value}) + keyCounter++ + } + + builder := txConfig.NewTxBuilder() + builder.SetMsgs(msgs...) + + txBytes, err := txConfig.TxEncoder()(builder.GetTx()) + require.NoError(t, err) + resp := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, resp.IsOK(), "%v", resp.String()) + } + app.EndBlock(abci.RequestEndBlock{Height: height}) + app.Commit() + + // Wait for snapshot to be taken, since it happens asynchronously. + if config.snapshotInterval > 0 && uint64(height)%config.snapshotInterval == 0 { + start := time.Now() + for { + if time.Since(start) > snapshotTimeout { + t.Errorf("timed out waiting for snapshot after %v", snapshotTimeout) + } + snapshot, err := snapshotStore.Get(uint64(height), snapshottypes.CurrentFormat) + require.NoError(t, err) + if snapshot != nil { + break + } + time.Sleep(100 * time.Millisecond) + } + } + } + + return app, nil +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestLoadVersion(t *testing.T) { + logger := defaultLogger() + pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + // make a cap key and mount the store + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + + emptyCommitID := storetypes.CommitID{} + + // fresh store has zero/empty last commit + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, int64(0), lastHeight) + require.Equal(t, emptyCommitID, lastID) + + // execute a block, collect commit ID + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res := app.Commit() + commitID1 := storetypes.CommitID{Version: 1, Hash: res.Data} + + // execute a block, collect commit ID + header = tmproto.Header{Height: 2} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res = app.Commit() + commitID2 := storetypes.CommitID{Version: 2, Hash: res.Data} + + // reload with LoadLatestVersion + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + app.MountStores() + err = app.LoadLatestVersion() + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(2), commitID2) + + // reload with LoadVersion, see if you can commit the same block and get + // the same result + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + testLoadVersionHelper(t, app, int64(2), commitID2) +} + +func useDefaultLoader(app *baseapp.BaseApp) { + app.SetStoreLoader(baseapp.DefaultStoreLoader) +} + +func initStore(t *testing.T, db dbm.DB, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db, log.NewNopLogger()) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, int64(0), rs.LastCommitID().Version) + + // write some data in substore + kv, _ := rs.GetStore(key).(storetypes.KVStore) + require.NotNil(t, kv) + kv.Set(k, v) + commitID := rs.Commit() + require.Equal(t, int64(1), commitID.Version) +} + +func checkStore(t *testing.T, db dbm.DB, ver int64, storeKey string, k, v []byte) { + rs := rootmulti.NewStore(db, log.NewNopLogger()) + rs.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) + key := sdk.NewKVStoreKey(storeKey) + rs.MountStoreWithDB(key, storetypes.StoreTypeIAVL, nil) + err := rs.LoadLatestVersion() + require.Nil(t, err) + require.Equal(t, ver, rs.LastCommitID().Version) + + // query data in substore + kv, _ := rs.GetStore(key).(storetypes.KVStore) + require.NotNil(t, kv) + require.Equal(t, v, kv.Get(k)) +} + +// Test that we can make commits and then reload old versions. +// Test that LoadLatestVersion actually does. +func TestSetLoader(t *testing.T) { + cases := map[string]struct { + setLoader func(*baseapp.BaseApp) + origStoreKey string + loadStoreKey string + }{ + "don't set loader": { + origStoreKey: "foo", + loadStoreKey: "foo", + }, + "default loader": { + setLoader: useDefaultLoader, + origStoreKey: "foo", + loadStoreKey: "foo", + }, + } + + k := []byte("key") + v := []byte("value") + + for name, tc := range cases { + tc := tc + t.Run(name, func(t *testing.T) { + // prepare a db with some data + db := dbm.NewMemDB() + initStore(t, db, tc.origStoreKey, k, v) + + // load the app with the existing db + opts := []func(*baseapp.BaseApp){baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing))} + if tc.setLoader != nil { + opts = append(opts, tc.setLoader) + } + app := baseapp.NewBaseApp(t.Name(), defaultLogger(), db, nil, opts...) + app.MountStores(sdk.NewKVStoreKey(tc.loadStoreKey)) + err := app.LoadLatestVersion() + require.Nil(t, err) + + // "execute" one block + app.BeginBlock(abci.RequestBeginBlock{Header: tmproto.Header{Height: 2}}) + res := app.Commit() + require.NotNil(t, res.Data) + + // check db is properly updated + checkStore(t, db, 2, tc.loadStoreKey, k, v) + checkStore(t, db, 2, tc.loadStoreKey, []byte("foo"), nil) + }) + } +} + +func TestVersionSetterGetter(t *testing.T) { + logger := defaultLogger() + pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningDefault)) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + require.Equal(t, "", app.Version()) + res := app.Query(abci.RequestQuery{Path: "app/version"}) + require.True(t, res.IsOK()) + require.Equal(t, "", string(res.Value)) + + versionString := "1.0.0" + app.SetVersion(versionString) + require.Equal(t, versionString, app.Version()) + res = app.Query(abci.RequestQuery{Path: "app/version"}) + require.True(t, res.IsOK()) + require.Equal(t, versionString, string(res.Value)) +} + +func TestLoadVersionInvalid(t *testing.T) { + logger := log.NewNopLogger() + pruningOpt := baseapp.SetPruning(pruningtypes.NewPruningOptions(pruningtypes.PruningNothing)) + db := dbm.NewMemDB() + name := t.Name() + app := baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + err := app.LoadLatestVersion() + require.Nil(t, err) + + // require error when loading an invalid version + err = app.LoadVersion(-1) + require.Error(t, err) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + res := app.Commit() + commitID1 := storetypes.CommitID{Version: 1, Hash: res.Data} + + // create a new app with the stores mounted under the same cap key + app = baseapp.NewBaseApp(name, logger, db, nil, pruningOpt) + + // require we can load the latest version + err = app.LoadVersion(1) + require.Nil(t, err) + testLoadVersionHelper(t, app, int64(1), commitID1) + + // require error when loading an invalid version + err = app.LoadVersion(2) + require.Error(t, err) +} + +func testLoadVersionHelper(t *testing.T, app *baseapp.BaseApp, expectedHeight int64, expectedID storetypes.CommitID) { + lastHeight := app.LastBlockHeight() + lastID := app.LastCommitID() + require.Equal(t, expectedHeight, lastHeight) + require.Equal(t, expectedID, lastID) +} + +func TestOptionFunction(t *testing.T) { + logger := defaultLogger() + db := dbm.NewMemDB() + bap := baseapp.NewBaseApp("starting name", logger, db, nil, testChangeNameHelper("new name")) + require.Equal(t, bap.Name(), "new name", "BaseApp should have had name changed via option function") +} + +func testChangeNameHelper(name string) func(*baseapp.BaseApp) { + return func(bap *baseapp.BaseApp) { + bap.SetName(name) + } +} + +// Test that Info returns the latest committed state. +func TestInfo(t *testing.T) { + app := setupBaseApp(t) + + // ----- test an empty response ------- + reqInfo := abci.RequestInfo{} + res := app.Info(reqInfo) + + // should be empty + assert.Equal(t, "", res.Version) + assert.Equal(t, t.Name(), res.GetData()) + assert.Equal(t, int64(0), res.LastBlockHeight) + require.Equal(t, []uint8(nil), res.LastBlockAppHash) + require.Equal(t, app.AppVersion(), res.AppVersion) + // ----- test a proper response ------- + // TODO +} + +func TestBaseAppOptionSeal(t *testing.T) { + app := setupBaseApp(t) + + require.Panics(t, func() { + app.SetName("") + }) + require.Panics(t, func() { + app.SetVersion("") + }) + require.Panics(t, func() { + app.SetDB(nil) + }) + require.Panics(t, func() { + app.SetCMS(nil) + }) + require.Panics(t, func() { + app.SetInitChainer(nil) + }) + require.Panics(t, func() { + app.SetBeginBlocker(nil) + }) + require.Panics(t, func() { + app.SetEndBlocker(nil) + }) + require.Panics(t, func() { + app.SetAnteHandler(nil) + }) + require.Panics(t, func() { + app.SetAddrPeerFilter(nil) + }) + require.Panics(t, func() { + app.SetIDPeerFilter(nil) + }) + require.Panics(t, func() { + app.SetFauxMerkleMode() + }) +} + +func TestInitChainer(t *testing.T) { + name := t.Name() + // keep the db and logger ourselves so + // we can reload the same app later + db := dbm.NewMemDB() + logger := defaultLogger() + app := baseapp.NewBaseApp(name, logger, db, nil) + capKey := sdk.NewKVStoreKey("main") + capKey2 := sdk.NewKVStoreKey("key2") + app.MountStores(capKey, capKey2) + + // set a value in the store on init chain + key, value := []byte("hello"), []byte("goodbye") + var initChainer sdk.InitChainer = func(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { + store := ctx.KVStore(capKey) + store.Set(key, value) + return abci.ResponseInitChain{} + } + + query := abci.RequestQuery{ + Path: "/store/main/key", + Data: key, + } + + // initChainer is nil - nothing happens + app.InitChain(abci.RequestInitChain{}) + res := app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // set initChainer and try again - should see the value + app.SetInitChainer(initChainer) + + // stores are mounted and private members are set - sealing baseapp + err := app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(0), app.LastBlockHeight()) + + initChainRes := app.InitChain(abci.RequestInitChain{AppStateBytes: []byte("{}"), ChainId: "test-chain-id"}) // must have valid JSON genesis file, even if empty + + // The AppHash returned by a new chain is the sha256 hash of "". + // $ echo -n '' | sha256sum + // e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 + require.Equal( + t, + []byte{0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55}, + initChainRes.AppHash, + ) + + // assert that chainID is set correctly in InitChain + + chainID := getDeliverStateCtx(app).ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in deliverState not set correctly in InitChain") + + chainID = getCheckStateCtx(app).ChainID() + require.Equal(t, "test-chain-id", chainID, "ChainID in checkState not set correctly in InitChain") + + app.Commit() + res = app.Query(query) + require.Equal(t, int64(1), app.LastBlockHeight()) + require.Equal(t, value, res.Value) + + // reload app + app = baseapp.NewBaseApp(name, logger, db, nil) + app.SetInitChainer(initChainer) + app.MountStores(capKey, capKey2) + err = app.LoadLatestVersion() // needed to make stores non-nil + require.Nil(t, err) + require.Equal(t, int64(1), app.LastBlockHeight()) + + // ensure we can still query after reloading + res = app.Query(query) + require.Equal(t, value, res.Value) + + // commit and ensure we can still query + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + + res = app.Query(query) + require.Equal(t, value, res.Value) +} + +func TestInitChain_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := defaultLogger() + app := baseapp.NewBaseApp(name, logger, db, nil) + + app.InitChain( + abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + app.Commit() + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +func TestBeginBlock_WithInitialHeight(t *testing.T) { + name := t.Name() + db := dbm.NewMemDB() + logger := defaultLogger() + app := baseapp.NewBaseApp(name, logger, db, nil) + + app.InitChain( + abci.RequestInitChain{ + InitialHeight: 3, + }, + ) + + require.PanicsWithError(t, "invalid height: 4; expected: 3", func() { + app.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: 4, + }, + }) + }) + + app.BeginBlock(abci.RequestBeginBlock{ + Header: tmproto.Header{ + Height: 3, + }, + }) + app.Commit() + + require.Equal(t, int64(3), app.LastBlockHeight()) +} + +// Number of messages doesn't matter to CheckTx. +func TestMultiMsgCheckTx(t *testing.T) { + // TODO: ensure we get the same results + // with one message or many +} + +// Interleave calls to Check and Deliver and ensure +// that there is no cross-talk. Check sees results of the previous Check calls +// and Deliver sees that of the previous Deliver calls, but they don't see eachother. +func TestConcurrentCheckDeliver(t *testing.T) { + // TODO +} + +func TestGRPCQuery(t *testing.T) { + grpcQueryOpt := func(bapp *baseapp.BaseApp) { + testdata.RegisterQueryServer( + bapp.GRPCQueryRouter(), + testdata.QueryImpl{}, + ) + } + + app := setupBaseApp(t, grpcQueryOpt) + app.GRPCQueryRouter().SetInterfaceRegistry(codectypes.NewInterfaceRegistry()) + + app.InitChain(abci.RequestInitChain{}) + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + app.Commit() + + req := testdata.SayHelloRequest{Name: "foo"} + reqBz, err := req.Marshal() + require.NoError(t, err) + + reqQuery := abci.RequestQuery{ + Data: reqBz, + Path: "/testdata.Query/SayHello", + } + + resQuery := app.Query(reqQuery) + + require.Equal(t, abci.CodeTypeOK, resQuery.Code, resQuery) + + var res testdata.SayHelloResponse + err = res.Unmarshal(resQuery.Value) + require.NoError(t, err) + require.Equal(t, "Hello foo!", res.Greeting) +} + +// Test p2p filter queries +func TestP2PQuery(t *testing.T) { + addrPeerFilterOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAddrPeerFilter(func(addrport string) abci.ResponseQuery { + require.Equal(t, "1.1.1.1:8000", addrport) + return abci.ResponseQuery{Code: uint32(3)} + }) + } + + idPeerFilterOpt := func(bapp *baseapp.BaseApp) { + bapp.SetIDPeerFilter(func(id string) abci.ResponseQuery { + require.Equal(t, "testid", id) + return abci.ResponseQuery{Code: uint32(4)} + }) + } + + app := setupBaseApp(t, addrPeerFilterOpt, idPeerFilterOpt) + + addrQuery := abci.RequestQuery{ + Path: "/p2p/filter/addr/1.1.1.1:8000", + } + res := app.Query(addrQuery) + require.Equal(t, uint32(3), res.Code) + + idQuery := abci.RequestQuery{ + Path: "/p2p/filter/id/testid", + } + res = app.Query(idQuery) + require.Equal(t, uint32(4), res.Code) +} + +func TestListSnapshots(t *testing.T) { + setupConfig := &setupConfig{ + blocks: 5, + blockTxs: 4, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + + app, err := setupBaseAppWithSnapshots(t, setupConfig) + require.NoError(t, err) + + resp := app.ListSnapshots(abci.RequestListSnapshots{}) + for _, s := range resp.Snapshots { + assert.NotEmpty(t, s.Hash) + assert.NotEmpty(t, s.Metadata) + s.Hash = nil + s.Metadata = nil + } + assert.Equal(t, abci.ResponseListSnapshots{Snapshots: []*abci.Snapshot{ + {Height: 4, Format: snapshottypes.CurrentFormat, Chunks: 2}, + {Height: 2, Format: snapshottypes.CurrentFormat, Chunks: 1}, + }}, resp) +} + +func TestSnapshotWithPruning(t *testing.T) { + testcases := map[string]struct { + config *setupConfig + expectedSnapshots []*abci.Snapshot + expectedErr error + }{ + "prune nothing with snapshot": { + config: &setupConfig{ + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 1, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 20, Format: 2, Chunks: 5}, + }, + }, + "prune everything with snapshot": { + config: &setupConfig{ + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 1, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningEverything), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 20, Format: 2, Chunks: 5}, + }, + }, + "default pruning with snapshot": { + config: &setupConfig{ + blocks: 20, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 1, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningDefault), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 20, Format: 2, Chunks: 5}, + }, + }, + "custom": { + config: &setupConfig{ + blocks: 25, + blockTxs: 2, + snapshotInterval: 5, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewCustomPruningOptions(12, 12), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 25, Format: 2, Chunks: 6}, + {Height: 20, Format: 2, Chunks: 5}, + }, + }, + "no snapshots": { + config: &setupConfig{ + blocks: 10, + blockTxs: 2, + snapshotInterval: 0, // 0 implies disable snapshots + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + }, + expectedSnapshots: []*abci.Snapshot{}, + }, + "keep all snapshots": { + config: &setupConfig{ + blocks: 10, + blockTxs: 2, + snapshotInterval: 3, + snapshotKeepRecent: 0, // 0 implies keep all snapshots + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + }, + expectedSnapshots: []*abci.Snapshot{ + {Height: 9, Format: 2, Chunks: 2}, + {Height: 6, Format: 2, Chunks: 2}, + {Height: 3, Format: 2, Chunks: 1}, + }, + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + app, err := setupBaseAppWithSnapshots(t, tc.config) + + if tc.expectedErr != nil { + require.Error(t, err) + require.Equal(t, tc.expectedErr.Error(), err.Error()) + return + } + require.NoError(t, err) + + resp := app.ListSnapshots(abci.RequestListSnapshots{}) + for _, s := range resp.Snapshots { + assert.NotEmpty(t, s.Hash) + assert.NotEmpty(t, s.Metadata) + s.Hash = nil + s.Metadata = nil + } + fmt.Println(resp) + assert.Equal(t, abci.ResponseListSnapshots{Snapshots: tc.expectedSnapshots}, resp) + + // Validate that heights were pruned correctly by querying the state at the last height that should be present relative to latest + // and the first height that should be pruned. + // + // Exceptions: + // * Prune nothing: should be able to query all heights (we only test first and latest) + // * Prune default: should be able to query all heights (we only test first and latest) + // * The reason for default behaving this way is that we only commit 20 heights but default has 100_000 keep-recent + var lastExistingHeight int64 + if tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { + lastExistingHeight = 1 + } else { + // Integer division rounds down so by multiplying back we get the last height at which we pruned + lastExistingHeight = int64((tc.config.blocks/tc.config.pruningOpts.Interval)*tc.config.pruningOpts.Interval - tc.config.pruningOpts.KeepRecent) + } + + // Query 1 + res := app.Query(abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight}) + require.NotNil(t, res, "height: %d", lastExistingHeight) + require.NotNil(t, res.Value, "height: %d", lastExistingHeight) + + // Query 2 + res = app.Query(abci.RequestQuery{Path: fmt.Sprintf("/store/%s/key", capKey2.Name()), Data: []byte("0"), Height: lastExistingHeight - 1}) + require.NotNil(t, res, "height: %d", lastExistingHeight-1) + if tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningNothing || tc.config.pruningOpts.GetPruningStrategy() == pruningtypes.PruningDefault { + // With prune nothing or default, we query height 0 which translates to the latest height. + require.NotNil(t, res.Value, "height: %d", lastExistingHeight-1) + } + }) + } +} + +func TestLoadSnapshotChunk(t *testing.T) { + setupConfig := &setupConfig{ + blocks: 2, + blockTxs: 5, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + app, err := setupBaseAppWithSnapshots(t, setupConfig) + require.NoError(t, err) + + testcases := map[string]struct { + height uint64 + format uint32 + chunk uint32 + expectEmpty bool + }{ + "Existing snapshot": {2, snapshottypes.CurrentFormat, 1, false}, + "Missing height": {100, snapshottypes.CurrentFormat, 1, true}, + "Missing format": {2, 3, 1, true}, + "Missing chunk": {2, snapshottypes.CurrentFormat, 9, true}, + "Zero height": {0, snapshottypes.CurrentFormat, 1, true}, + "Zero format": {2, 0, 1, true}, + "Zero chunk": {2, snapshottypes.CurrentFormat, 0, false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + resp := app.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ + Height: tc.height, + Format: tc.format, + Chunk: tc.chunk, + }) + if tc.expectEmpty { + assert.Equal(t, abci.ResponseLoadSnapshotChunk{}, resp) + return + } + assert.NotEmpty(t, resp.Chunk) + }) + } +} + +func TestOfferSnapshot_Errors(t *testing.T) { + // Set up app before test cases, since it's fairly expensive. + setupConfig := &setupConfig{ + blocks: 0, + blockTxs: 0, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + app, err := setupBaseAppWithSnapshots(t, setupConfig) + require.NoError(t, err) + + m := snapshottypes.Metadata{ChunkHashes: [][]byte{{1}, {2}, {3}}} + metadata, err := m.Marshal() + require.NoError(t, err) + hash := []byte{1, 2, 3} + + testcases := map[string]struct { + snapshot *abci.Snapshot + result abci.ResponseOfferSnapshot_Result + }{ + "nil snapshot": {nil, abci.ResponseOfferSnapshot_REJECT}, + "invalid format": {&abci.Snapshot{ + Height: 1, Format: 9, Chunks: 3, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT_FORMAT}, + "incorrect chunk count": {&abci.Snapshot{ + Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 2, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "no chunks": {&abci.Snapshot{ + Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: metadata, + }, abci.ResponseOfferSnapshot_REJECT}, + "invalid metadata serialization": {&abci.Snapshot{ + Height: 1, Format: snapshottypes.CurrentFormat, Chunks: 0, Hash: hash, Metadata: []byte{3, 1, 4}, + }, abci.ResponseOfferSnapshot_REJECT}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: tc.snapshot}) + assert.Equal(t, tc.result, resp.Result) + }) + } + + // Offering a snapshot after one has been accepted should error + resp := app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 1, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, resp) + + resp = app.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: &abci.Snapshot{ + Height: 2, + Format: snapshottypes.CurrentFormat, + Chunks: 3, + Hash: []byte{1, 2, 3}, + Metadata: metadata, + }}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}, resp) +} + +func TestApplySnapshotChunk(t *testing.T) { + setupConfig1 := &setupConfig{ + blocks: 4, + blockTxs: 10, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + source, err := setupBaseAppWithSnapshots(t, setupConfig1) + require.NoError(t, err) + + setupConfig2 := &setupConfig{ + blocks: 0, + blockTxs: 0, + snapshotInterval: 2, + snapshotKeepRecent: 2, + pruningOpts: pruningtypes.NewPruningOptions(pruningtypes.PruningNothing), + } + target, err := setupBaseAppWithSnapshots(t, setupConfig2) + require.NoError(t, err) + + // Fetch latest snapshot to restore + respList := source.ListSnapshots(abci.RequestListSnapshots{}) + require.NotEmpty(t, respList.Snapshots) + snapshot := respList.Snapshots[0] + + // Make sure the snapshot has at least 3 chunks + require.GreaterOrEqual(t, snapshot.Chunks, uint32(3), "Not enough snapshot chunks") + + // Begin a snapshot restoration in the target + respOffer := target.OfferSnapshot(abci.RequestOfferSnapshot{Snapshot: snapshot}) + require.Equal(t, abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, respOffer) + + // We should be able to pass an invalid chunk and get a verify failure, before reapplying it. + respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ + Index: 0, + Chunk: []byte{9}, + Sender: "sender", + }) + require.Equal(t, abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_RETRY, + RefetchChunks: []uint32{0}, + RejectSenders: []string{"sender"}, + }, respApply) + + // Fetch each chunk from the source and apply it to the target + for index := uint32(0); index < snapshot.Chunks; index++ { + respChunk := source.LoadSnapshotChunk(abci.RequestLoadSnapshotChunk{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunk: index, + }) + require.NotNil(t, respChunk.Chunk) + respApply := target.ApplySnapshotChunk(abci.RequestApplySnapshotChunk{ + Index: index, + Chunk: respChunk.Chunk, + }) + require.Equal(t, abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_ACCEPT, + }, respApply) + } + + // The target should now have the same hash as the source + assert.Equal(t, source.LastCommitID(), target.LastCommitID()) +} + +func TestBaseApp_EndBlock(t *testing.T) { + db := dbm.NewMemDB() + name := t.Name() + logger := defaultLogger() + + cp := &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 5000000, + }, + } + + app := baseapp.NewBaseApp(name, logger, db, nil) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + app.InitChain(abci.RequestInitChain{ + ConsensusParams: cp, + }) + + app.SetEndBlocker(func(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ + ValidatorUpdates: []abci.ValidatorUpdate{ + {Power: 100}, + }, + } + }) + app.Seal() + + res := app.EndBlock(abci.RequestEndBlock{}) + require.Len(t, res.GetValidatorUpdates(), 1) + require.Equal(t, int64(100), res.GetValidatorUpdates()[0].Power) + require.Equal(t, cp.Block.MaxGas, res.ConsensusParamUpdates.Block.MaxGas) +} + +// Test that txs can be unmarshalled and read and that +// correct error codes are returned when not +func TestTxDecoder(t *testing.T) { + cdc := codec.NewProtoCodec(codectypes.NewInterfaceRegistry()) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + + tx := newTxCounter(txConfig, 1, 0) + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + + dTx, err := txConfig.TxDecoder()(txBytes) + require.NoError(t, err) + + counter, _ := parseTxMemo(tx) + dTxCounter, _ := parseTxMemo(dTx) + require.Equal(t, counter, dTxCounter) +} + +// Tx processing - CheckTx, DeliverTx, SimulateTx. +// These tests use the serialized tx as input, while most others will use the +// Check(), Deliver(), Simulate() methods directly. +// Ensure that Check/Deliver/Simulate work as expected with the store. + +// Test that successive CheckTx can see each others' effects +// on the store within a block, and that the CheckTx state +// gets reset to the latest committed state during Commit +func TestCheckTx(t *testing.T) { + // This ante handler reads the key and checks that the value matches the current counter. + // This ensures changes to the kvstore persist across successive CheckTx. + counterKey := []byte("counter-key") + + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, counterKey)) } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImpl{t, capKey1, counterKey}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + nTxs := int64(5) + app.InitChain(abci.RequestInitChain{}) + + for i := int64(0); i < nTxs; i++ { + tx := newTxCounter(txConfig, i, 0) // no messages + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + + require.NoError(t, err) + r := app.CheckTx(abci.RequestCheckTx{Tx: txBytes}) + require.Equal(t, testTxPriority, r.Priority) + require.Empty(t, r.GetEvents()) + require.True(t, r.IsOK(), fmt.Sprintf("%v", r)) + } + + checkStateStore := getCheckStateCtx(app.BaseApp).KVStore(capKey1) + storedCounter := getIntFromStore(checkStateStore, counterKey) + + // Ensure AnteHandler ran + require.Equal(t, nTxs, storedCounter) + + // If a block is committed, CheckTx state should be reset. + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header, Hash: []byte("hash")}) + + require.NotNil(t, getCheckStateCtx(app.BaseApp).BlockGasMeter(), "block gas meter should have been set to checkState") + require.NotEmpty(t, getCheckStateCtx(app.BaseApp).HeaderHash()) + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + + checkStateStore = getCheckStateCtx(app.BaseApp).KVStore(capKey1) + storedBytes := checkStateStore.Get(counterKey) + require.Nil(t, storedBytes) +} + +// Test that successive DeliverTx can see each others' effects +// on the store, both within and across blocks. +func TestDeliverTx(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + nBlocks := 3 + txPerHeight := 5 + + for blockN := 0; blockN < nBlocks; blockN++ { + header := tmproto.Header{Height: int64(blockN) + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + for i := 0; i < txPerHeight; i++ { + counter := int64(blockN*txPerHeight + i) + tx := newTxCounter(txConfig, counter, counter) + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + events := res.GetEvents() + require.Len(t, events, 3, "should contain ante handler, message type and counter events respectively") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent("ante_handler", counter).ToABCIEvents(), map[string]struct{}{})[0], events[0], "ante handler event") + require.Equal(t, sdk.MarkEventsToIndex(counterEvent(sdk.EventTypeMessage, counter).ToABCIEvents(), map[string]struct{}{})[0], events[2], "msg handler update counter event") + } + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +// One call to DeliverTx should process all the messages, in order. +func TestMultiMsgDeliverTx(t *testing.T) { + // test increments in the ante + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + deliverKey2 := []byte("deliver-key2") + baseapptestutil.RegisterCounter2Server(app.MsgServiceRouter(), Counter2ServerImpl{t, capKey1, deliverKey2}) + + // run a multi-msg tx + // with all msgs the same route + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + tx := newTxCounter(txConfig, 0, 0, 1, 2) + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + store := getDeliverStateCtx(app.BaseApp).KVStore(capKey1) + + // tx counter only incremented once + txCounter := getIntFromStore(store, anteKey) + require.Equal(t, int64(1), txCounter) + + // msg counter incremented three times + msgCounter := getIntFromStore(store, deliverKey) + require.Equal(t, int64(3), msgCounter) + + // replace the second message with a Counter2 + tx = newTxCounter(txConfig, 1, 3) + + builder := txConfig.NewTxBuilder() + msgs := tx.GetMsgs() + msgs = append(msgs, &baseapptestutil.MsgCounter2{Counter: 0}) + msgs = append(msgs, &baseapptestutil.MsgCounter2{Counter: 1}) + + builder.SetMsgs(msgs...) + builder.SetMemo(tx.GetMemo()) + + txBytes, err = txConfig.TxEncoder()(builder.GetTx()) + require.NoError(t, err) + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + store = getDeliverStateCtx(app.BaseApp).KVStore(capKey1) + + // tx counter only incremented once + txCounter = getIntFromStore(store, anteKey) + require.Equal(t, int64(2), txCounter) + + // original counter increments by one + // new counter increments by two + msgCounter = getIntFromStore(store, deliverKey) + require.Equal(t, int64(4), msgCounter) + msgCounter2 := getIntFromStore(store, deliverKey2) + require.Equal(t, int64(2), msgCounter2) +} + +// Simulate a transaction that uses gas to compute the gas. +// Simulate() and Query("/app/simulate", txBytes) should give +// the same results. +func TestSimulateTx(t *testing.T) { + gasConsumed := uint64(5) + + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasConsumed)) + return + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImplGasMeterOnly{gasConsumed}) + + nBlocks := 3 + for blockN := 0; blockN < nBlocks; blockN++ { + count := int64(blockN + 1) + header := tmproto.Header{Height: count} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + tx := newTxCounter(txConfig, count, count) + + txBytes, err := txConfig.TxEncoder()(tx) + require.Nil(t, err) + + // simulate a message, check gas reported + gInfo, result, err := app.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate again, same result + gInfo, result, err = app.Simulate(txBytes) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, gasConsumed, gInfo.GasUsed) + + // simulate by calling Query with encoded tx + query := abci.RequestQuery{ + Path: "/app/simulate", + Data: txBytes, + } + queryResult := app.Query(query) + require.True(t, queryResult.IsOK(), queryResult.Log) + + var simRes sdk.SimulationResponse + require.NoError(t, jsonpb.Unmarshal(strings.NewReader(string(queryResult.Value)), &simRes)) + + require.Equal(t, gInfo, simRes.GasInfo) + require.Equal(t, result.Log, simRes.Result.Log) + require.Equal(t, result.Events, simRes.Result.Events) + require.True(t, bytes.Equal(result.Data, simRes.Result.Data)) + + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() + } +} + +func TestRunInvalidTransaction(t *testing.T) { + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + return + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // transaction with no messages + { + emptyTx := txConfig.NewTxBuilder().GetTx() + _, result, err := app.SimDeliver(txConfig.TxEncoder(), emptyTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidRequest.ABCICode(), code, err) + } + + // transaction where ValidateBasic fails + { + testCases := []struct { + tx signing.Tx + fail bool + }{ + {newTxCounter(txConfig, 0, 0), false}, + {newTxCounter(txConfig, -1, 0), false}, + {newTxCounter(txConfig, 100, 100), false}, + {newTxCounter(txConfig, 100, 5, 4, 3, 2, 1), false}, + + {newTxCounter(txConfig, 0, -1), true}, + {newTxCounter(txConfig, 0, 1, -2), true}, + {newTxCounter(txConfig, 0, 1, 2, -10, 5), true}, + } + + for _, testCase := range testCases { + tx := testCase.tx + _, result, err := app.SimDeliver(txConfig.TxEncoder(), tx) + + if testCase.fail { + require.Error(t, err) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrInvalidSequence.ABCICode(), code, err) + } else { + require.NotNil(t, result) + } + } + } + + // transaction with no known route + { + txBuilder := txConfig.NewTxBuilder() + txBuilder.SetMsgs(&baseapptestutil.MsgCounter2{}) + unknownRouteTx := txBuilder.GetTx() + + _, result, err := app.SimDeliver(txConfig.TxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + + txBuilder = txConfig.NewTxBuilder() + txBuilder.SetMsgs(&baseapptestutil.MsgCounter{}, &baseapptestutil.MsgCounter2{}) + unknownRouteTx = txBuilder.GetTx() + _, result, err = app.SimDeliver(txConfig.TxEncoder(), unknownRouteTx) + require.Error(t, err) + require.Nil(t, result) + + space, code, _ = sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrUnknownRequest.ABCICode(), code, err) + } + + // Transaction with an unregistered message + { + txBuilder := txConfig.NewTxBuilder() + txBuilder.SetMsgs(&testdata.MsgCreateDog{}) + tx := txBuilder.GetTx() + + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.EqualValues(t, sdkerrors.ErrTxDecode.ABCICode(), res.Code) + require.EqualValues(t, sdkerrors.ErrTxDecode.Codespace(), res.Codespace) + } +} + +// Test that transactions exceeding gas limits fail +func TestTxGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) + + // AnteHandlers must have their own defer/recover in order for the BaseApp + // to know how much gas was used! This is because the GasMeter is created in + // the AnteHandler, but if it panics the context won't be set properly in + // runTx's recover call. + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count, _ := parseTxMemo(tx) + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return newCtx, nil + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + testCases := []struct { + tx signing.Tx + gasUsed uint64 + fail bool + }{ + {newTxCounter(txConfig, 0, 0), 0, false}, + {newTxCounter(txConfig, 1, 1), 2, false}, + {newTxCounter(txConfig, 9, 1), 10, false}, + {newTxCounter(txConfig, 1, 9), 10, false}, + {newTxCounter(txConfig, 10, 0), 10, false}, + {newTxCounter(txConfig, 0, 10), 10, false}, + {newTxCounter(txConfig, 0, 8, 2), 10, false}, + {newTxCounter(txConfig, 0, 5, 1, 1, 1, 1, 1), 10, false}, + {newTxCounter(txConfig, 0, 5, 1, 1, 1, 1), 9, false}, + + {newTxCounter(txConfig, 9, 2), 11, true}, + {newTxCounter(txConfig, 2, 9), 11, true}, + {newTxCounter(txConfig, 9, 1, 1), 11, true}, + {newTxCounter(txConfig, 1, 8, 1, 1), 11, true}, + {newTxCounter(txConfig, 11, 0), 11, true}, + {newTxCounter(txConfig, 0, 11), 11, true}, + {newTxCounter(txConfig, 0, 5, 11), 16, true}, + } + + for i, tc := range testCases { + tx := tc.tx + gInfo, result, err := app.SimDeliver(txConfig.TxEncoder(), tx) + + // check gas used and wanted + require.Equal(t, tc.gasUsed, gInfo.GasUsed, fmt.Sprintf("tc #%d; gas: %v, result: %v, err: %s", i, gInfo, result, err)) + + // check for out of gas + if !tc.fail { + require.NotNil(t, result, fmt.Sprintf("%d: %v, %v", i, tc, err)) + } else { + require.Error(t, err) + require.Nil(t, result) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + } + } +} + +// Test that transactions exceeding gas limits fail +func TestMaxBlockGasLimits(t *testing.T) { + gasGranted := uint64(10) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasGranted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + err = sdkerrors.Wrapf(sdkerrors.ErrOutOfGas, "out of gas in location: %v", rType.Descriptor) + default: + panic(r) + } + } + }() + + count, _ := parseTxMemo(tx) + newCtx.GasMeter().ConsumeGas(uint64(count), "counter-ante") + + return + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + + app.InitChain(abci.RequestInitChain{ + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 100, + }, + }, + }) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + testCases := []struct { + tx signing.Tx + numDelivers int + gasUsedPerDeliver uint64 + fail bool + failAfterDeliver int + }{ + {newTxCounter(txConfig, 0, 0), 0, 0, false, 0}, + {newTxCounter(txConfig, 9, 1), 2, 10, false, 0}, + {newTxCounter(txConfig, 10, 0), 3, 10, false, 0}, + {newTxCounter(txConfig, 10, 0), 10, 10, false, 0}, + {newTxCounter(txConfig, 2, 7), 11, 9, false, 0}, + {newTxCounter(txConfig, 10, 0), 10, 10, false, 0}, // hit the limit but pass + + {newTxCounter(txConfig, 10, 0), 11, 10, true, 10}, + {newTxCounter(txConfig, 10, 0), 15, 10, true, 10}, + {newTxCounter(txConfig, 9, 0), 12, 9, true, 11}, // fly past the limit + } + + for i, tc := range testCases { + tx := tc.tx + + // reset the block gas + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // execute the transaction multiple times + for j := 0; j < tc.numDelivers; j++ { + _, result, err := app.SimDeliver(txConfig.TxEncoder(), tx) + + ctx := getDeliverStateCtx(app.BaseApp) + + // check for failed transactions + if tc.fail && (j+1) > tc.failAfterDeliver { + require.Error(t, err, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + require.Nil(t, result, fmt.Sprintf("tc #%d; result: %v, err: %s", i, result, err)) + + space, code, _ := sdkerrors.ABCIInfo(err, false) + require.EqualValues(t, sdkerrors.ErrOutOfGas.Codespace(), space, err) + require.EqualValues(t, sdkerrors.ErrOutOfGas.ABCICode(), code, err) + require.True(t, ctx.BlockGasMeter().IsOutOfGas()) + } else { + // check gas used and wanted + blockGasUsed := ctx.BlockGasMeter().GasConsumed() + expBlockGasUsed := tc.gasUsedPerDeliver * uint64(j+1) + require.Equal( + t, expBlockGasUsed, blockGasUsed, + fmt.Sprintf("%d,%d: %v, %v, %v, %v", i, j, tc, expBlockGasUsed, blockGasUsed, result), + ) + + require.NotNil(t, result, fmt.Sprintf("tc #%d; currDeliver: %d, result: %v, err: %s", i, j, result, err)) + require.False(t, ctx.BlockGasMeter().IsPastLimit()) + } + } + } +} + +// Test custom panic handling within app.DeliverTx method +func TestCustomRunTxPanicHandler(t *testing.T) { + const customPanicMsg = "test panic" + anteErr := sdkerrors.Register("fakeModule", 100500, "fakeError") + + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + panic(sdkerrors.Wrap(anteErr, "anteHandler")) + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + app.InitChain(abci.RequestInitChain{}) + + header := tmproto.Header{Height: 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + app.AddRunTxRecoveryHandler(func(recoveryObj interface{}) error { + err, ok := recoveryObj.(error) + if !ok { + return nil + } + + if anteErr.Is(err) { + panic(customPanicMsg) + } else { + return nil + } + }) + + // Transaction should panic with custom handler above + { + tx := newTxCounter(txConfig, 0, 0) + + require.PanicsWithValue(t, customPanicMsg, func() { app.SimDeliver(txConfig.TxEncoder(), tx) }) + } +} + +func TestBaseAppAnteHandler(t *testing.T) { + anteKey := []byte("ante-key") + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(anteHandlerTxTest(t, capKey1, anteKey)) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + deliverKey := []byte("deliver-key") + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImpl{t, capKey1, deliverKey}) + + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + + // execute a tx that will fail ante handler execution + // + // NOTE: State should not be mutated here. This will be implicitly checked by + // the next txs ante handler execution (anteHandlerTxTest). + tx := newTxCounter(txConfig, 0, 0) + tx = setFailOnAnte(txConfig, tx, true) + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.Empty(t, res.Events) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx := getDeliverStateCtx(app.BaseApp) + store := ctx.KVStore(capKey1) + require.Equal(t, int64(0), getIntFromStore(store, anteKey)) + + // execute at tx that will pass the ante handler (the checkTx state should + // mutate) but will fail the message handler + tx = newTxCounter(txConfig, 0, 0) + tx = setFailOnHandler(txConfig, tx, true) + + txBytes, err = txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + // should emit ante event + require.NotEmpty(t, res.Events) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx = getDeliverStateCtx(app.BaseApp) + store = ctx.KVStore(capKey1) + require.Equal(t, int64(1), getIntFromStore(store, anteKey)) + require.Equal(t, int64(0), getIntFromStore(store, deliverKey)) + + // execute a successful ante handler and message execution where state is + // implicitly checked by previous tx executions + tx = newTxCounter(txConfig, 1, 0) + + txBytes, err = txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.NotEmpty(t, res.Events) + require.True(t, res.IsOK(), fmt.Sprintf("%v", res)) + + ctx = getDeliverStateCtx(app.BaseApp) + store = ctx.KVStore(capKey1) + require.Equal(t, int64(2), getIntFromStore(store, anteKey)) + require.Equal(t, int64(1), getIntFromStore(store, deliverKey)) + + // commit + app.EndBlock(abci.RequestEndBlock{}) + app.Commit() +} + +func TestGasConsumptionBadTx(t *testing.T) { + gasWanted := uint64(5) + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + newCtx = ctx.WithGasMeter(sdk.NewGasMeter(gasWanted)) + + defer func() { + if r := recover(); r != nil { + switch rType := r.(type) { + case sdk.ErrorOutOfGas: + log := fmt.Sprintf("out of gas in location: %v", rType.Descriptor) + err = sdkerrors.Wrap(sdkerrors.ErrOutOfGas, log) + default: + panic(r) + } + } + }() + + counter, failOnAnte := parseTxMemo(tx) + newCtx.GasMeter().ConsumeGas(uint64(counter), "counter-ante") + if failOnAnte { + return newCtx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + return + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + + app.InitChain(abci.RequestInitChain{ + ConsensusParams: &abci.ConsensusParams{ + Block: &abci.BlockParams{ + MaxGas: 9, + }, + }, + }) + + app.InitChain(abci.RequestInitChain{}) + + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + tx := newTxCounter(txConfig, 5, 0) + tx = setFailOnAnte(txConfig, tx, true) + txBytes, err := txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) + + // require next tx to fail due to black gas limit + tx = newTxCounter(txConfig, 5, 0) + txBytes, err = txConfig.TxEncoder()(tx) + require.NoError(t, err) + + res = app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes}) + require.False(t, res.IsOK(), fmt.Sprintf("%v", res)) +} + +// Test that we can only query from the latest committed state. +func TestQuery(t *testing.T) { + key, value := []byte("hello"), []byte("goodbye") + anteOpt := func(bapp *baseapp.BaseApp) { + bapp.SetAnteHandler(func(ctx sdk.Context, tx sdk.Tx, simulate bool) (newCtx sdk.Context, err error) { + store := ctx.KVStore(capKey1) + store.Set(key, value) + return + }) + } + + // Setup baseapp. + var ( + appBuilder *runtime.AppBuilder + cdc codec.ProtoCodecMarshaler + ) + err := depinject.Inject(makeMinimalConfig(), &appBuilder, &cdc) + require.NoError(t, err) + + testCtx := testutil.DefaultContextWithDB(t, capKey1, sdk.NewTransientStoreKey("transient_test")) + + app := appBuilder.Build(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), testCtx.DB, nil, anteOpt) + app.SetCMS(testCtx.CMS) + + baseapptestutil.RegisterInterfaces(cdc.InterfaceRegistry()) + baseapptestutil.RegisterCounterServer(app.MsgServiceRouter(), CounterServerImplGasMeterOnly{}) + + // patch in TxConfig instead of using an output from x/auth/tx + txConfig := authtx.NewTxConfig(cdc, authtx.DefaultSignModes) + // set the TxDecoder in the BaseApp for minimal tx simulations + app.SetTxDecoder(txConfig.TxDecoder()) + app.SetParamStore(¶mStore{db: dbm.NewMemDB()}) + + app.InitChain(abci.RequestInitChain{}) + + // NOTE: "/store/key1" tells us KVStore + // and the final "/key" says to use the data as the + // key in the given KVStore ... + query := abci.RequestQuery{ + Path: "/store/key1/key", + Data: key, + } + tx := newTxCounter(txConfig, 0, 0) + + // query is empty before we do anything + res := app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a CheckTx + _, resTx, err := app.SimCheck(txConfig.TxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + res = app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query is still empty after a DeliverTx before we commit + header := tmproto.Header{Height: app.LastBlockHeight() + 1} + app.BeginBlock(abci.RequestBeginBlock{Header: header}) + + _, resTx, err = app.SimDeliver(txConfig.TxEncoder(), tx) + require.NoError(t, err) + require.NotNil(t, resTx) + res = app.Query(query) + require.Equal(t, 0, len(res.Value)) + + // query returns correct value after Commit + app.Commit() + res = app.Query(query) + require.Equal(t, value, res.Value) +} + +func getCheckStateCtx(app *baseapp.BaseApp) sdk.Context { + v := reflect.ValueOf(app).Elem() + f := v.FieldByName("checkState") + rf := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() + return rf.MethodByName("Context").Call(nil)[0].Interface().(sdk.Context) +} + +func getDeliverStateCtx(app *baseapp.BaseApp) sdk.Context { + v := reflect.ValueOf(app).Elem() + f := v.FieldByName("deliverState") + rf := reflect.NewAt(f.Type(), unsafe.Pointer(f.UnsafeAddr())).Elem() + return rf.MethodByName("Context").Call(nil)[0].Interface().(sdk.Context) +} + +func parseTxMemo(tx sdk.Tx) (counter int64, failOnAnte bool) { + txWithMemo, ok := tx.(sdk.TxWithMemo) + if !ok { + panic("not a sdk.TxWithMemo") + } + memo := txWithMemo.GetMemo() + vals, err := url.ParseQuery(memo) + if err != nil { + panic("invalid memo") + } + + counter, err = strconv.ParseInt(vals.Get("counter"), 10, 64) + if err != nil { + panic("invalid counter") + } + + failOnAnte = vals.Get("failOnAnte") == "true" + + return counter, failOnAnte +} + +func counterEvent(evType string, msgCount int64) sdk.Events { + return sdk.Events{ + sdk.NewEvent( + evType, + sdk.NewAttribute("update_counter", fmt.Sprintf("%d", msgCount)), + ), + } +} + +func newTxCounter(cfg client.TxConfig, counter int64, msgCounters ...int64) signing.Tx { + msgs := make([]sdk.Msg, 0, len(msgCounters)) + for _, c := range msgCounters { + msg := &baseapptestutil.MsgCounter{Counter: c, FailOnHandler: false} + msgs = append(msgs, msg) + } + + builder := cfg.NewTxBuilder() + builder.SetMsgs(msgs...) + builder.SetMemo("counter=" + strconv.FormatInt(counter, 10) + "&failOnAnte=false") + + return builder.GetTx() +} + +func setFailOnAnte(cfg client.TxConfig, tx signing.Tx, failOnAnte bool) signing.Tx { + builder := cfg.NewTxBuilder() + builder.SetMsgs(tx.GetMsgs()...) + + memo := tx.GetMemo() + vals, err := url.ParseQuery(memo) + if err != nil { + panic("invalid memo") + } + + vals.Set("failOnAnte", strconv.FormatBool(failOnAnte)) + memo = vals.Encode() + builder.SetMemo(memo) + + return builder.GetTx() +} + +func setFailOnHandler(cfg client.TxConfig, tx signing.Tx, fail bool) signing.Tx { + builder := cfg.NewTxBuilder() + builder.SetMemo(tx.GetMemo()) + + msgs := tx.GetMsgs() + for i, msg := range msgs { + msgs[i] = &baseapptestutil.MsgCounter{ + Counter: msg.(*baseapptestutil.MsgCounter).Counter, + FailOnHandler: fail, + } + } + + builder.SetMsgs(msgs...) + return builder.GetTx() +} + +func anteHandlerTxTest(t *testing.T, capKey storetypes.StoreKey, storeKey []byte) sdk.AnteHandler { + return func(ctx sdk.Context, tx sdk.Tx, simulate bool) (sdk.Context, error) { + store := ctx.KVStore(capKey) + counter, failOnAnte := parseTxMemo(tx) + + if failOnAnte { + return ctx, sdkerrors.Wrap(sdkerrors.ErrUnauthorized, "ante handler failure") + } + + _, err := incrementingCounter(t, store, storeKey, counter) + if err != nil { + return ctx, err + } + + ctx.EventManager().EmitEvents( + counterEvent("ante_handler", counter), + ) + + ctx = ctx.WithPriority(testTxPriority) + + return ctx, nil + } +} + +// check counter matches what's in store. +// increment and store +func incrementingCounter(t *testing.T, store sdk.KVStore, counterKey []byte, counter int64) (*sdk.Result, error) { + storedCounter := getIntFromStore(store, counterKey) + require.Equal(t, storedCounter, counter) + setIntOnStore(store, counterKey, counter+1) + return &sdk.Result{}, nil +} + +func getIntFromStore(store sdk.KVStore, key []byte) int64 { + bz := store.Get(key) + if len(bz) == 0 { + return 0 + } + i, err := binary.ReadVarint(bytes.NewBuffer(bz)) + if err != nil { + panic(err) + } + return i +} + +func setIntOnStore(store sdk.KVStore, key []byte, i int64) { + bz := make([]byte, 8) + n := binary.PutVarint(bz, i) + store.Set(key, bz[:n]) +} + +type MsgKeyValueImpl struct{} + +func (m MsgKeyValueImpl) Set(ctx context.Context, msg *baseapptestutil.MsgKeyValue) (*baseapptestutil.MsgCreateKeyValueResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + sdkCtx.KVStore(capKey2).Set(msg.Key, msg.Value) + return &baseapptestutil.MsgCreateKeyValueResponse{}, nil +} + +type CounterServerImpl struct { + t *testing.T + capKey storetypes.StoreKey + deliverKey []byte +} + +func (m CounterServerImpl) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { + return incrementCounter(ctx, m.t, m.capKey, m.deliverKey, msg) +} + +type Counter2ServerImpl struct { + t *testing.T + capKey storetypes.StoreKey + deliverKey []byte +} + +func (m Counter2ServerImpl) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter2) (*baseapptestutil.MsgCreateCounterResponse, error) { + return incrementCounter(ctx, m.t, m.capKey, m.deliverKey, msg) +} + +func incrementCounter(ctx context.Context, + t *testing.T, + capKey storetypes.StoreKey, + deliverKey []byte, + msg sdk.Msg, +) (*baseapptestutil.MsgCreateCounterResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + store := sdkCtx.KVStore(capKey) + + sdkCtx.GasMeter().ConsumeGas(5, "test") + + var msgCount int64 + + switch m := msg.(type) { + case *baseapptestutil.MsgCounter: + if m.FailOnHandler { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") + } + msgCount = m.Counter + case *baseapptestutil.MsgCounter2: + if m.FailOnHandler { + return nil, sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "message handler failure") + } + msgCount = m.Counter + } + + sdkCtx.EventManager().EmitEvents( + counterEvent(sdk.EventTypeMessage, msgCount), + ) + + _, err := incrementingCounter(t, store, deliverKey, msgCount) + if err != nil { + return nil, err + } + + return &baseapptestutil.MsgCreateCounterResponse{}, nil +} + +type CounterServerImplGasMeterOnly struct { + gas uint64 +} + +func (m CounterServerImplGasMeterOnly) IncrementCounter(ctx context.Context, msg *baseapptestutil.MsgCounter) (*baseapptestutil.MsgCreateCounterResponse, error) { + sdkCtx := sdk.UnwrapSDKContext(ctx) + gas := m.gas + + // if no gas is provided, use the counter as gas. This is useful for testing + if gas == 0 { + gas = uint64(msg.Counter) + } + sdkCtx.GasMeter().ConsumeGas(gas, "test") + return &baseapptestutil.MsgCreateCounterResponse{}, nil +} + +type paramStore struct { + db *dbm.MemDB +} + +func (ps *paramStore) Set(_ sdk.Context, key []byte, value interface{}) { + bz, err := json.Marshal(value) + if err != nil { + panic(err) + } + + ps.db.Set(key, bz) +} + +func (ps *paramStore) Has(_ sdk.Context, key []byte) bool { + ok, err := ps.db.Has(key) + if err != nil { + panic(err) + } + + return ok +} + +func (ps *paramStore) Get(_ sdk.Context, key []byte, ptr interface{}) { + bz, err := ps.db.Get(key) + if err != nil { + panic(err) + } + + if len(bz) == 0 { + return + } + + if err := json.Unmarshal(bz, ptr); err != nil { + panic(err) + } +} diff --git a/baseapp/options.go b/baseapp/options.go index 57abd2b19960..a9377f223f84 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -200,14 +200,6 @@ func (app *BaseApp) SetStoreLoader(loader StoreLoader) { app.storeLoader = loader } -// SetRouter allows us to customize the router. -func (app *BaseApp) SetRouter(router sdk.Router) { - if app.sealed { - panic("SetRouter() on sealed BaseApp") - } - app.router = router -} - // SetSnapshot sets the snapshot store and options. func (app *BaseApp) SetSnapshot(snapshotStore *snapshots.Store, opts snapshottypes.SnapshotOptions) { if app.sealed { diff --git a/baseapp/router.go b/baseapp/router.go deleted file mode 100644 index 7e2e70a0c6f3..000000000000 --- a/baseapp/router.go +++ /dev/null @@ -1,41 +0,0 @@ -package baseapp - -import ( - "fmt" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type Router struct { - routes map[string]sdk.Handler -} - -var _ sdk.Router = NewRouter() - -// NewRouter returns a reference to a new router. -func NewRouter() *Router { - return &Router{ - routes: make(map[string]sdk.Handler), - } -} - -// AddRoute adds a route path to the router with a given handler. The route must -// be alphanumeric. -func (rtr *Router) AddRoute(route sdk.Route) sdk.Router { - if !sdk.IsAlphaNumeric(route.Path()) { - panic("route expressions can only contain alphanumeric characters") - } - if rtr.routes[route.Path()] != nil { - panic(fmt.Sprintf("route %s has already been initialized", route.Path())) - } - - rtr.routes[route.Path()] = route.Handler() - return rtr -} - -// Route returns a handler for a given route path. -// -// TODO: Handle expressive matches. -func (rtr *Router) Route(_ sdk.Context, path string) sdk.Handler { - return rtr.routes[path] -} diff --git a/baseapp/router_test.go b/baseapp/router_test.go deleted file mode 100644 index 1e11dc0ca089..000000000000 --- a/baseapp/router_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package baseapp - -import ( - "testing" - - "github.com/stretchr/testify/require" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var testHandler = func(_ sdk.Context, _ sdk.Msg) (*sdk.Result, error) { - return &sdk.Result{}, nil -} - -func TestRouter(t *testing.T) { - rtr := NewRouter() - - // require panic on invalid route - require.Panics(t, func() { - rtr.AddRoute(sdk.NewRoute("*", testHandler)) - }) - - rtr.AddRoute(sdk.NewRoute("testRoute", testHandler)) - h := rtr.Route(sdk.Context{}, "testRoute") - require.NotNil(t, h) - - // require panic on duplicate route - require.Panics(t, func() { - rtr.AddRoute(sdk.NewRoute("testRoute", testHandler)) - }) -} diff --git a/baseapp/testutil/buf.gen.yaml b/baseapp/testutil/buf.gen.yaml new file mode 100644 index 000000000000..d7d17bbb26f8 --- /dev/null +++ b/baseapp/testutil/buf.gen.yaml @@ -0,0 +1,5 @@ +version: v1 +plugins: + - name: gocosmos + out: ../.. + opt: plugins=grpc,Mgoogle/protobuf/any.proto=github.com/cosmos/cosmos-sdk/codec/types diff --git a/baseapp/testutil/buf.lock b/baseapp/testutil/buf.lock new file mode 100644 index 000000000000..c6f890bd4b3e --- /dev/null +++ b/baseapp/testutil/buf.lock @@ -0,0 +1,17 @@ +# Generated by buf. DO NOT EDIT. +version: v1 +deps: + - remote: buf.build + owner: cosmos + repository: cosmos-proto + branch: main + commit: 9e9a53f8db0d493f8b8c66d458c767c1 + digest: b1-6w7Hozd_Oo_yZ1Sku8Nhz9qou-4licLr6VmEyeI9jO4= + create_time: 2021-12-02T20:41:47.795828Z + - remote: buf.build + owner: cosmos + repository: gogo-proto + branch: main + commit: bee5511075b7499da6178d9e4aaa628b + digest: b1-rrBIustouD-S80cVoZ_rM0qJsmei9AgbXy9GPQu6vxg= + create_time: 2021-12-02T20:01:17.069307Z diff --git a/baseapp/testutil/buf.yaml b/baseapp/testutil/buf.yaml new file mode 100644 index 000000000000..e6f82c0cdcd7 --- /dev/null +++ b/baseapp/testutil/buf.yaml @@ -0,0 +1,4 @@ +version: v1 +deps: + - buf.build/cosmos/gogo-proto + - buf.build/cosmos/cosmos-proto diff --git a/baseapp/testutil/messages.go b/baseapp/testutil/messages.go new file mode 100644 index 000000000000..54f72f30d77b --- /dev/null +++ b/baseapp/testutil/messages.go @@ -0,0 +1,56 @@ +package testutil + +import ( + "github.com/cosmos/cosmos-sdk/codec/types" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +func RegisterInterfaces(registry types.InterfaceRegistry) { + registry.RegisterImplementations( + (*sdk.Msg)(nil), + &MsgCounter{}, + &MsgCounter2{}, + &MsgKeyValue{}, + ) + msgservice.RegisterMsgServiceDesc(registry, &_Counter_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_Counter2_serviceDesc) + msgservice.RegisterMsgServiceDesc(registry, &_KeyValue_serviceDesc) +} + +var _ sdk.Msg = &MsgCounter{} + +func (msg *MsgCounter) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgCounter2{} + +func (msg *MsgCounter2) GetSigners() []sdk.AccAddress { return []sdk.AccAddress{} } +func (msg *MsgCounter2) ValidateBasic() error { + if msg.Counter >= 0 { + return nil + } + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "counter should be a non-negative integer") +} + +var _ sdk.Msg = &MsgKeyValue{} + +func (msg *MsgKeyValue) GetSigners() []sdk.AccAddress { + return []sdk.AccAddress{sdk.MustAccAddressFromBech32(msg.Signer)} +} + +func (msg *MsgKeyValue) ValidateBasic() error { + if msg.Key == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "key cannot be nil") + } + if msg.Value == nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidRequest, "value cannot be nil") + } + return nil +} diff --git a/baseapp/testutil/messages.pb.go b/baseapp/testutil/messages.pb.go new file mode 100644 index 000000000000..9a19f22013af --- /dev/null +++ b/baseapp/testutil/messages.pb.go @@ -0,0 +1,1293 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: messages.proto + +package testutil + +import ( + context "context" + fmt "fmt" + _ "github.com/cosmos/cosmos-sdk/codec/types" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +type MsgCounter struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` +} + +func (m *MsgCounter) Reset() { *m = MsgCounter{} } +func (m *MsgCounter) String() string { return proto.CompactTextString(m) } +func (*MsgCounter) ProtoMessage() {} +func (*MsgCounter) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{0} +} +func (m *MsgCounter) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter.Merge(m, src) +} +func (m *MsgCounter) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter proto.InternalMessageInfo + +func (m *MsgCounter) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +type MsgCounter2 struct { + Counter int64 `protobuf:"varint,1,opt,name=counter,proto3" json:"counter,omitempty"` + FailOnHandler bool `protobuf:"varint,2,opt,name=fail_on_handler,json=failOnHandler,proto3" json:"fail_on_handler,omitempty"` +} + +func (m *MsgCounter2) Reset() { *m = MsgCounter2{} } +func (m *MsgCounter2) String() string { return proto.CompactTextString(m) } +func (*MsgCounter2) ProtoMessage() {} +func (*MsgCounter2) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{1} +} +func (m *MsgCounter2) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCounter2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCounter2.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCounter2) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCounter2.Merge(m, src) +} +func (m *MsgCounter2) XXX_Size() int { + return m.Size() +} +func (m *MsgCounter2) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCounter2.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCounter2 proto.InternalMessageInfo + +func (m *MsgCounter2) GetCounter() int64 { + if m != nil { + return m.Counter + } + return 0 +} + +func (m *MsgCounter2) GetFailOnHandler() bool { + if m != nil { + return m.FailOnHandler + } + return false +} + +type MsgCreateCounterResponse struct { +} + +func (m *MsgCreateCounterResponse) Reset() { *m = MsgCreateCounterResponse{} } +func (m *MsgCreateCounterResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateCounterResponse) ProtoMessage() {} +func (*MsgCreateCounterResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{2} +} +func (m *MsgCreateCounterResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateCounterResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateCounterResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateCounterResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateCounterResponse.Merge(m, src) +} +func (m *MsgCreateCounterResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateCounterResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateCounterResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateCounterResponse proto.InternalMessageInfo + +type MsgKeyValue struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Signer string `protobuf:"bytes,3,opt,name=signer,proto3" json:"signer,omitempty"` +} + +func (m *MsgKeyValue) Reset() { *m = MsgKeyValue{} } +func (m *MsgKeyValue) String() string { return proto.CompactTextString(m) } +func (*MsgKeyValue) ProtoMessage() {} +func (*MsgKeyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{3} +} +func (m *MsgKeyValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgKeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgKeyValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgKeyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgKeyValue.Merge(m, src) +} +func (m *MsgKeyValue) XXX_Size() int { + return m.Size() +} +func (m *MsgKeyValue) XXX_DiscardUnknown() { + xxx_messageInfo_MsgKeyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgKeyValue proto.InternalMessageInfo + +func (m *MsgKeyValue) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *MsgKeyValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *MsgKeyValue) GetSigner() string { + if m != nil { + return m.Signer + } + return "" +} + +type MsgCreateKeyValueResponse struct { +} + +func (m *MsgCreateKeyValueResponse) Reset() { *m = MsgCreateKeyValueResponse{} } +func (m *MsgCreateKeyValueResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateKeyValueResponse) ProtoMessage() {} +func (*MsgCreateKeyValueResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4dc296cbfe5ffcd5, []int{4} +} +func (m *MsgCreateKeyValueResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateKeyValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateKeyValueResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateKeyValueResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateKeyValueResponse.Merge(m, src) +} +func (m *MsgCreateKeyValueResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateKeyValueResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateKeyValueResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateKeyValueResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCounter)(nil), "testdata.MsgCounter") + proto.RegisterType((*MsgCounter2)(nil), "testdata.MsgCounter2") + proto.RegisterType((*MsgCreateCounterResponse)(nil), "testdata.MsgCreateCounterResponse") + proto.RegisterType((*MsgKeyValue)(nil), "testdata.MsgKeyValue") + proto.RegisterType((*MsgCreateKeyValueResponse)(nil), "testdata.MsgCreateKeyValueResponse") +} + +func init() { proto.RegisterFile("messages.proto", fileDescriptor_4dc296cbfe5ffcd5) } + +var fileDescriptor_4dc296cbfe5ffcd5 = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x92, 0xc1, 0x8a, 0x9b, 0x50, + 0x14, 0x86, 0x63, 0xa5, 0x89, 0x3d, 0x4d, 0xdb, 0x20, 0x69, 0x31, 0x16, 0x24, 0x58, 0x28, 0xd9, + 0x44, 0xc1, 0x3e, 0x41, 0xdb, 0x45, 0x5b, 0x5a, 0x1b, 0xb0, 0xd0, 0x61, 0x66, 0x13, 0xae, 0xe6, + 0xe4, 0x46, 0xa2, 0xf7, 0x8a, 0xf7, 0x3a, 0x90, 0xb7, 0x98, 0xc7, 0x9a, 0x65, 0x96, 0xb3, 0x1c, + 0x92, 0x17, 0x19, 0xd4, 0x98, 0x30, 0xc1, 0xc5, 0x2c, 0x66, 0xe5, 0x39, 0xff, 0x0f, 0xdf, 0xcf, + 0xf9, 0xbd, 0xf0, 0x36, 0x45, 0x21, 0x08, 0x45, 0xe1, 0x64, 0x39, 0x97, 0x5c, 0xd7, 0x24, 0x0a, + 0xb9, 0x20, 0x92, 0x98, 0x43, 0xca, 0x29, 0xaf, 0x44, 0xb7, 0x9c, 0x6a, 0xdf, 0x1c, 0x51, 0xce, + 0x69, 0x82, 0x6e, 0xb5, 0x85, 0xc5, 0xd2, 0x25, 0x6c, 0x53, 0x5b, 0xf6, 0x5f, 0x00, 0x5f, 0xd0, + 0xef, 0xbc, 0x60, 0x12, 0x73, 0xdd, 0x80, 0x5e, 0x54, 0x8f, 0x86, 0x32, 0x56, 0x26, 0x6a, 0xd0, + 0xac, 0xfa, 0x67, 0x78, 0xb7, 0x24, 0x71, 0x32, 0xe7, 0x6c, 0xbe, 0x22, 0x6c, 0x91, 0x60, 0x6e, + 0xbc, 0x18, 0x2b, 0x13, 0x2d, 0x78, 0x53, 0xca, 0x33, 0xf6, 0xb3, 0x16, 0xed, 0x19, 0xbc, 0x3e, + 0xf1, 0xbc, 0x67, 0x00, 0x9a, 0x60, 0x94, 0xc0, 0x1c, 0x89, 0xc4, 0x03, 0x36, 0x40, 0x91, 0x71, + 0x26, 0xd0, 0xf6, 0xab, 0xb0, 0xdf, 0xb8, 0xf9, 0x4f, 0x92, 0x02, 0xf5, 0x01, 0xa8, 0x6b, 0xdc, + 0x54, 0x41, 0xfd, 0xa0, 0x1c, 0xf5, 0x21, 0xbc, 0xbc, 0x2e, 0xad, 0x0a, 0xdd, 0x0f, 0xea, 0x45, + 0xff, 0x00, 0x5d, 0x11, 0x53, 0x86, 0xb9, 0xa1, 0x8e, 0x95, 0xc9, 0xab, 0xe0, 0xb0, 0xd9, 0x1f, + 0x61, 0x74, 0x8c, 0x6a, 0xa0, 0x4d, 0x96, 0x77, 0x01, 0xbd, 0xa6, 0xa5, 0x3f, 0x30, 0xf8, 0xc5, + 0xa2, 0x1c, 0x53, 0x64, 0xb2, 0xd1, 0x86, 0x4e, 0xf3, 0x0f, 0x9c, 0xd3, 0xfd, 0xa6, 0xfd, 0x58, + 0x6d, 0x3b, 0xc2, 0xbb, 0x04, 0xed, 0x58, 0x97, 0xdf, 0x42, 0x7e, 0xdf, 0x46, 0xf6, 0x9e, 0x84, + 0xf6, 0x41, 0x3b, 0x96, 0xf3, 0x15, 0xd4, 0x7f, 0x28, 0xcf, 0x68, 0x8d, 0x6b, 0x7e, 0x6a, 0xa1, + 0x9d, 0x57, 0xf0, 0xed, 0xc7, 0xed, 0xce, 0x52, 0xb6, 0x3b, 0x4b, 0xb9, 0xdf, 0x59, 0xca, 0xcd, + 0xde, 0xea, 0x6c, 0xf7, 0x56, 0xe7, 0x6e, 0x6f, 0x75, 0xae, 0xa6, 0x34, 0x96, 0xab, 0x22, 0x74, + 0x22, 0x9e, 0xba, 0x11, 0x17, 0x29, 0x17, 0x87, 0xcf, 0x54, 0x2c, 0xd6, 0x6e, 0x48, 0x04, 0x92, + 0x2c, 0x73, 0xcb, 0x88, 0x42, 0xc6, 0x49, 0xd8, 0xad, 0xde, 0xde, 0x97, 0x87, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x63, 0x31, 0xab, 0xcc, 0xc8, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// CounterClient is the client API for Counter service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type CounterClient interface { + IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counterClient struct { + cc grpc1.ClientConn +} + +func NewCounterClient(cc grpc1.ClientConn) CounterClient { + return &counterClient{cc} +} + +func (c *counterClient) IncrementCounter(ctx context.Context, in *MsgCounter, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/testdata.Counter/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CounterServer is the server API for Counter service. +type CounterServer interface { + IncrementCounter(context.Context, *MsgCounter) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounterServer can be embedded to have forward compatible implementations. +type UnimplementedCounterServer struct { +} + +func (*UnimplementedCounterServer) IncrementCounter(ctx context.Context, req *MsgCounter) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounterServer(s grpc1.Server, srv CounterServer) { + s.RegisterService(&_Counter_serviceDesc, srv) +} + +func _Counter_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CounterServer).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.Counter/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CounterServer).IncrementCounter(ctx, req.(*MsgCounter)) + } + return interceptor(ctx, in, info, handler) +} + +var _Counter_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.Counter", + HandlerType: (*CounterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// Counter2Client is the client API for Counter2 service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type Counter2Client interface { + IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) +} + +type counter2Client struct { + cc grpc1.ClientConn +} + +func NewCounter2Client(cc grpc1.ClientConn) Counter2Client { + return &counter2Client{cc} +} + +func (c *counter2Client) IncrementCounter(ctx context.Context, in *MsgCounter2, opts ...grpc.CallOption) (*MsgCreateCounterResponse, error) { + out := new(MsgCreateCounterResponse) + err := c.cc.Invoke(ctx, "/testdata.Counter2/IncrementCounter", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Counter2Server is the server API for Counter2 service. +type Counter2Server interface { + IncrementCounter(context.Context, *MsgCounter2) (*MsgCreateCounterResponse, error) +} + +// UnimplementedCounter2Server can be embedded to have forward compatible implementations. +type UnimplementedCounter2Server struct { +} + +func (*UnimplementedCounter2Server) IncrementCounter(ctx context.Context, req *MsgCounter2) (*MsgCreateCounterResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IncrementCounter not implemented") +} + +func RegisterCounter2Server(s grpc1.Server, srv Counter2Server) { + s.RegisterService(&_Counter2_serviceDesc, srv) +} + +func _Counter2_IncrementCounter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCounter2) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(Counter2Server).IncrementCounter(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.Counter2/IncrementCounter", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(Counter2Server).IncrementCounter(ctx, req.(*MsgCounter2)) + } + return interceptor(ctx, in, info, handler) +} + +var _Counter2_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.Counter2", + HandlerType: (*Counter2Server)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "IncrementCounter", + Handler: _Counter2_IncrementCounter_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +// KeyValueClient is the client API for KeyValue service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type KeyValueClient interface { + Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) +} + +type keyValueClient struct { + cc grpc1.ClientConn +} + +func NewKeyValueClient(cc grpc1.ClientConn) KeyValueClient { + return &keyValueClient{cc} +} + +func (c *keyValueClient) Set(ctx context.Context, in *MsgKeyValue, opts ...grpc.CallOption) (*MsgCreateKeyValueResponse, error) { + out := new(MsgCreateKeyValueResponse) + err := c.cc.Invoke(ctx, "/testdata.KeyValue/Set", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// KeyValueServer is the server API for KeyValue service. +type KeyValueServer interface { + Set(context.Context, *MsgKeyValue) (*MsgCreateKeyValueResponse, error) +} + +// UnimplementedKeyValueServer can be embedded to have forward compatible implementations. +type UnimplementedKeyValueServer struct { +} + +func (*UnimplementedKeyValueServer) Set(ctx context.Context, req *MsgKeyValue) (*MsgCreateKeyValueResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Set not implemented") +} + +func RegisterKeyValueServer(s grpc1.Server, srv KeyValueServer) { + s.RegisterService(&_KeyValue_serviceDesc, srv) +} + +func _KeyValue_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgKeyValue) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(KeyValueServer).Set(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/testdata.KeyValue/Set", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(KeyValueServer).Set(ctx, req.(*MsgKeyValue)) + } + return interceptor(ctx, in, info, handler) +} + +var _KeyValue_serviceDesc = grpc.ServiceDesc{ + ServiceName: "testdata.KeyValue", + HandlerType: (*KeyValueServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Set", + Handler: _KeyValue_Set_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messages.proto", +} + +func (m *MsgCounter) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCounter2) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCounter2) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCounter2) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailOnHandler { + i-- + if m.FailOnHandler { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Counter != 0 { + i = encodeVarintMessages(dAtA, i, uint64(m.Counter)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateCounterResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateCounterResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateCounterResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgKeyValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgKeyValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgKeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Signer) > 0 { + i -= len(m.Signer) + copy(dAtA[i:], m.Signer) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Signer))) + i-- + dAtA[i] = 0x1a + } + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintMessages(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateKeyValueResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateKeyValueResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateKeyValueResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintMessages(dAtA []byte, offset int, v uint64) int { + offset -= sovMessages(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCounter) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + return n +} + +func (m *MsgCounter2) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Counter != 0 { + n += 1 + sovMessages(uint64(m.Counter)) + } + if m.FailOnHandler { + n += 2 + } + return n +} + +func (m *MsgCreateCounterResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgKeyValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + l = len(m.Signer) + if l > 0 { + n += 1 + l + sovMessages(uint64(l)) + } + return n +} + +func (m *MsgCreateKeyValueResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovMessages(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMessages(x uint64) (n int) { + return sovMessages(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCounter) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCounter2) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCounter2: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCounter2: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Counter", wireType) + } + m.Counter = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Counter |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailOnHandler", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailOnHandler = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateCounterResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateCounterResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateCounterResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgKeyValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgKeyValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgKeyValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signer", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMessages + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthMessages + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Signer = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateKeyValueResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMessages + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateKeyValueResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipMessages(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMessages + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMessages(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMessages + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMessages + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMessages + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMessages + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMessages = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMessages = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMessages = fmt.Errorf("proto: unexpected end of group") +) diff --git a/baseapp/testutil/messages.proto b/baseapp/testutil/messages.proto new file mode 100644 index 000000000000..866e33666983 --- /dev/null +++ b/baseapp/testutil/messages.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package testdata; + +import "gogoproto/gogo.proto"; +import "google/protobuf/any.proto"; + +option go_package = "github.com/cosmos/cosmos-sdk/baseapp/testutil"; + +message MsgCounter { + int64 counter = 1; + bool fail_on_handler = 2; +} + +message MsgCounter2 { + int64 counter = 1; + bool fail_on_handler = 2; +} + +message MsgCreateCounterResponse {} + +message MsgKeyValue { + bytes key = 1; + bytes value = 2; + string signer = 3; +} + +message MsgCreateKeyValueResponse {} + +service Counter { + rpc IncrementCounter(MsgCounter) returns (MsgCreateCounterResponse); +} + +service Counter2 { + rpc IncrementCounter(MsgCounter2) returns (MsgCreateCounterResponse); +} + +service KeyValue { + rpc Set(MsgKeyValue) returns (MsgCreateKeyValueResponse); +} \ No newline at end of file diff --git a/client/v2/internal/testpb/query.pulsar.go b/client/v2/internal/testpb/query.pulsar.go index d135ca28f008..16e9e8ffa8b5 100644 --- a/client/v2/internal/testpb/query.pulsar.go +++ b/client/v2/internal/testpb/query.pulsar.go @@ -2,11 +2,11 @@ package testpb import ( + v1beta11 "cosmossdk.io/api/cosmos/base/query/v1beta1" + v1beta1 "cosmossdk.io/api/cosmos/base/v1beta1" fmt "fmt" _ "github.com/cosmos/cosmos-proto" runtime "github.com/cosmos/cosmos-proto/runtime" - v1beta11 "cosmossdk.io/api/cosmos/base/query/v1beta1" - v1beta1 "cosmossdk.io/api/cosmos/base/v1beta1" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoiface "google.golang.org/protobuf/runtime/protoiface" protoimpl "google.golang.org/protobuf/runtime/protoimpl" diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 08ae3911a6d5..f4a689050214 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -35,6 +35,9 @@ cd .. # generate codec/testdata proto code (cd testutil/testdata; buf generate) +# generate baseapp test messages +(cd baseapp/testutil; buf generate) + # move proto files to the right places cp -r github.com/cosmos/cosmos-sdk/* ./ rm -rf github.com diff --git a/server/mock/app.go b/server/mock/app.go index 57dd93373de2..a1528be67f0a 100644 --- a/server/mock/app.go +++ b/server/mock/app.go @@ -1,6 +1,7 @@ package mock import ( + "context" "encoding/json" "errors" "fmt" @@ -8,12 +9,14 @@ import ( "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" + "google.golang.org/grpc" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" bam "github.com/cosmos/cosmos-sdk/baseapp" "github.com/cosmos/cosmos-sdk/codec" + codectypes "github.com/cosmos/cosmos-sdk/codec/types" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" ) @@ -38,8 +41,23 @@ func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { baseApp.SetInitChainer(InitChainer(capKeyMainStore)) - // Set a Route. - baseApp.Router().AddRoute(sdk.NewRoute("kvstore", KVStoreHandler(capKeyMainStore))) + interfaceRegistry := codectypes.NewInterfaceRegistry() + interfaceRegistry.RegisterImplementations((*sdk.Msg)(nil), &kvstoreTx{}) + router := bam.NewMsgServiceRouter() + router.SetInterfaceRegistry(interfaceRegistry) + + newDesc := &grpc.ServiceDesc{ + ServiceName: "test", + Methods: []grpc.MethodDesc{ + { + MethodName: "Test", + Handler: _Msg_Test_Handler, + }, + }, + } + + router.RegisterService(newDesc, &MsgServerImpl{capKeyMainStore}) + baseApp.SetMsgServiceRouter(router) // Load latest version. if err := baseApp.LoadLatestVersion(); err != nil { @@ -53,7 +71,7 @@ func NewApp(rootDir string, logger log.Logger) (abci.Application, error) { // them to the db func KVStoreHandler(storeKey storetypes.StoreKey) sdk.Handler { return func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - dTx, ok := msg.(kvstoreTx) + dTx, ok := msg.(*kvstoreTx) if !ok { return nil, errors.New("KVStoreHandler should only receive kvstoreTx") } @@ -126,3 +144,34 @@ func AppGenStateEmpty(_ *codec.LegacyAmino, _ types.GenesisDoc, _ []json.RawMess appState = json.RawMessage(``) return } + +// Manually write the handlers for this custom message +type MsgServer interface { + Test(ctx context.Context, msg *kvstoreTx) (*sdk.Result, error) +} + +type MsgServerImpl struct { + capKeyMainStore *storetypes.KVStoreKey +} + +func _Msg_Test_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(kvstoreTx) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).Test(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/kvstoreTx", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).Test(ctx, req.(*kvstoreTx)) + } + return interceptor(ctx, in, info, handler) +} + +func (m MsgServerImpl) Test(ctx context.Context, msg *kvstoreTx) (*sdk.Result, error) { + return KVStoreHandler(m.capKeyMainStore)(sdk.UnwrapSDKContext(ctx), msg) +} diff --git a/server/mock/tx.go b/server/mock/tx.go index af0f52e566ac..40555e8724e8 100644 --- a/server/mock/tx.go +++ b/server/mock/tx.go @@ -16,50 +16,42 @@ type kvstoreTx struct { } // dummy implementation of proto.Message -func (msg kvstoreTx) Reset() {} -func (msg kvstoreTx) String() string { return "TODO" } -func (msg kvstoreTx) ProtoMessage() {} +func (msg *kvstoreTx) Reset() {} +func (msg *kvstoreTx) String() string { return "TODO" } +func (msg *kvstoreTx) ProtoMessage() {} var ( - _ sdk.Tx = kvstoreTx{} - _ sdk.Msg = kvstoreTx{} + _ sdk.Tx = &kvstoreTx{} + _ sdk.Msg = &kvstoreTx{} ) -func NewTx(key, value string) kvstoreTx { +func NewTx(key, value string) *kvstoreTx { bytes := fmt.Sprintf("%s=%s", key, value) - return kvstoreTx{ + return &kvstoreTx{ key: []byte(key), value: []byte(value), bytes: []byte(bytes), } } -func (tx kvstoreTx) Route() string { - return "kvstore" -} - -func (tx kvstoreTx) Type() string { +func (tx *kvstoreTx) Type() string { return "kvstore_tx" } -func (tx kvstoreTx) GetMsgs() []sdk.Msg { +func (tx *kvstoreTx) GetMsgs() []sdk.Msg { return []sdk.Msg{tx} } -func (tx kvstoreTx) GetMemo() string { - return "" -} - -func (tx kvstoreTx) GetSignBytes() []byte { +func (tx *kvstoreTx) GetSignBytes() []byte { return tx.bytes } // Should the app be calling this? Or only handlers? -func (tx kvstoreTx) ValidateBasic() error { +func (tx *kvstoreTx) ValidateBasic() error { return nil } -func (tx kvstoreTx) GetSigners() []sdk.AccAddress { +func (tx *kvstoreTx) GetSigners() []sdk.AccAddress { return nil } @@ -71,10 +63,10 @@ func decodeTx(txBytes []byte) (sdk.Tx, error) { split := bytes.Split(txBytes, []byte("=")) if len(split) == 1 { //nolint:gocritic k := split[0] - tx = kvstoreTx{k, k, txBytes} + tx = &kvstoreTx{k, k, txBytes} } else if len(split) == 2 { k, v := split[0], split[1] - tx = kvstoreTx{k, v, txBytes} + tx = &kvstoreTx{k, v, txBytes} } else { return nil, sdkerrors.Wrap(sdkerrors.ErrTxDecode, "too many '='") } diff --git a/store/cachekv/search_benchmark_test.go b/store/cachekv/search_benchmark_test.go index d7f1dcb8d4f1..921bff4e3864 100644 --- a/store/cachekv/search_benchmark_test.go +++ b/store/cachekv/search_benchmark_test.go @@ -1,9 +1,10 @@ package cachekv import ( - db "github.com/tendermint/tm-db" "strconv" "testing" + + db "github.com/tendermint/tm-db" ) func BenchmarkLargeUnsortedMisses(b *testing.B) { diff --git a/tests/integration/genutil/gentx_test.go b/tests/integration/genutil/gentx_test.go index 31c5711e75a4..f2b384c72586 100644 --- a/tests/integration/genutil/gentx_test.go +++ b/tests/integration/genutil/gentx_test.go @@ -59,7 +59,6 @@ type GenTxTestSuite struct { } func (suite *GenTxTestSuite) SetupTest() { - encCfg := moduletestutil.TestEncodingConfig{} app, err := simtestutil.SetupWithConfiguration( diff --git a/tests/mocks/types_router.go b/tests/mocks/types_router.go index 27f8fe56b8f1..5d0bd4caa60f 100644 --- a/tests/mocks/types_router.go +++ b/tests/mocks/types_router.go @@ -11,57 +11,6 @@ import ( gomock "github.com/golang/mock/gomock" ) -// MockRouter is a mock of Router interface. -type MockRouter struct { - ctrl *gomock.Controller - recorder *MockRouterMockRecorder -} - -// MockRouterMockRecorder is the mock recorder for MockRouter. -type MockRouterMockRecorder struct { - mock *MockRouter -} - -// NewMockRouter creates a new mock instance. -func NewMockRouter(ctrl *gomock.Controller) *MockRouter { - mock := &MockRouter{ctrl: ctrl} - mock.recorder = &MockRouterMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockRouter) EXPECT() *MockRouterMockRecorder { - return m.recorder -} - -// AddRoute mocks base method. -func (m *MockRouter) AddRoute(r types.Route) types.Router { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "AddRoute", r) - ret0, _ := ret[0].(types.Router) - return ret0 -} - -// AddRoute indicates an expected call of AddRoute. -func (mr *MockRouterMockRecorder) AddRoute(r interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddRoute", reflect.TypeOf((*MockRouter)(nil).AddRoute), r) -} - -// Route mocks base method. -func (m *MockRouter) Route(ctx types.Context, path string) types.Handler { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Route", ctx, path) - ret0, _ := ret[0].(types.Handler) - return ret0 -} - -// Route indicates an expected call of Route. -func (mr *MockRouterMockRecorder) Route(ctx, path interface{}) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Route", reflect.TypeOf((*MockRouter)(nil).Route), ctx, path) -} - // MockQueryRouter is a mock of QueryRouter interface. type MockQueryRouter struct { ctrl *gomock.Controller diff --git a/types/module/module.go b/types/module/module.go index 08910d33ca40..e4be03c157ad 100644 --- a/types/module/module.go +++ b/types/module/module.go @@ -189,9 +189,6 @@ func NewGenesisOnlyAppModule(amg AppModuleGenesis) AppModule { // RegisterInvariants is a placeholder function register no invariants func (GenesisOnlyAppModule) RegisterInvariants(_ sdk.InvariantRegistry) {} -// Route empty module message route -func (GenesisOnlyAppModule) Route() sdk.Route { return sdk.Route{} } - // QuerierRoute returns an empty module querier route func (GenesisOnlyAppModule) QuerierRoute() string { return "" } diff --git a/types/router.go b/types/router.go index 9dab11f606c0..b8a1da2ae742 100644 --- a/types/router.go +++ b/types/router.go @@ -2,7 +2,6 @@ package types import ( "regexp" - "strings" ) var ( @@ -27,37 +26,6 @@ var ( IsNumeric = regexp.MustCompile(`^[0-9]+$`).MatchString ) -// Router provides handlers for each transaction type. -type Router interface { - AddRoute(r Route) Router - Route(ctx Context, path string) Handler -} - -type Route struct { - path string - handler Handler -} - -// NewRoute returns an instance of Route. -func NewRoute(p string, h Handler) Route { - return Route{path: strings.TrimSpace(p), handler: h} -} - -// Path returns the path the route has assigned. -func (r Route) Path() string { - return r.path -} - -// Handler returns the handler that handles the route. -func (r Route) Handler() Handler { - return r.handler -} - -// Empty returns true only if both handler and path are not empty. -func (r Route) Empty() bool { - return r.handler == nil || r.path == "" -} - // QueryRouter provides queryables for each query path. type QueryRouter interface { AddRoute(r string, h Querier) QueryRouter diff --git a/types/router_test.go b/types/router_test.go deleted file mode 100644 index 7387e9b94506..000000000000 --- a/types/router_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package types_test - -import ( - "testing" - - "github.com/stretchr/testify/suite" - - sdk "github.com/cosmos/cosmos-sdk/types" -) - -type routeTestSuite struct { - suite.Suite -} - -func TestRouteTestSuite(t *testing.T) { - suite.Run(t, new(routeTestSuite)) -} - -func (s *routeTestSuite) SetupSuite() { - s.T().Parallel() -} - -func (s *routeTestSuite) TestNilRoute() { - tests := []struct { - name string - route sdk.Route - expected bool - }{ - { - name: "all empty", - route: sdk.NewRoute("", nil), - expected: true, - }, - { - name: "only path", - route: sdk.NewRoute("some", nil), - expected: true, - }, - { - name: "only handler", - route: sdk.NewRoute("", func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return nil, nil - }), - expected: true, - }, - { - name: "correct route", - route: sdk.NewRoute("some", func(ctx sdk.Context, msg sdk.Msg) (*sdk.Result, error) { - return nil, nil - }), - expected: false, - }, - } - - for _, tt := range tests { - s.Require().Equal(tt.expected, tt.route.Empty()) - } -} diff --git a/x/bank/bench_test.go b/x/bank/bench_test.go index 6a91602d9bcf..fa936df15c58 100644 --- a/x/bank/bench_test.go +++ b/x/bank/bench_test.go @@ -30,7 +30,8 @@ func genSequenceOfTxs(txGen client.TxConfig, accNums []uint64, initSeqNums []uint64, numToGenerate int, - priv ...cryptotypes.PrivKey) ([]sdk.Tx, error) { + priv ...cryptotypes.PrivKey, +) ([]sdk.Tx, error) { txs := make([]sdk.Tx, numToGenerate) var err error for i := 0; i < numToGenerate; i++ { @@ -58,7 +59,7 @@ func genSequenceOfTxs(txGen client.TxConfig, } func BenchmarkOneBankSendTxPerBlock(b *testing.B) { - //b.Skip("Skipping benchmark with buggy code reported at https://github.com/cosmos/cosmos-sdk/issues/10023") + // b.Skip("Skipping benchmark with buggy code reported at https://github.com/cosmos/cosmos-sdk/issues/10023") b.ReportAllocs() // Add an account at genesis @@ -103,7 +104,7 @@ func BenchmarkOneBankSendTxPerBlock(b *testing.B) { } func BenchmarkOneBankMultiSendTxPerBlock(b *testing.B) { - //b.Skip("Skipping benchmark with buggy code reported at https://github.com/cosmos/cosmos-sdk/issues/10023") + // b.Skip("Skipping benchmark with buggy code reported at https://github.com/cosmos/cosmos-sdk/issues/10023") b.ReportAllocs() // Add an account at genesis diff --git a/x/bank/simulation/operations_test.go b/x/bank/simulation/operations_test.go index 727c4e3c38f6..6f4b5057cdd9 100644 --- a/x/bank/simulation/operations_test.go +++ b/x/bank/simulation/operations_test.go @@ -36,7 +36,6 @@ type SimTestSuite struct { } func (suite *SimTestSuite) SetupTest() { - var ( appBuilder *runtime.AppBuilder err error diff --git a/x/distribution/keeper/delegation_test.go b/x/distribution/keeper/delegation_test.go index caba2afb5ff3..af8eeb0d3c13 100644 --- a/x/distribution/keeper/delegation_test.go +++ b/x/distribution/keeper/delegation_test.go @@ -467,7 +467,6 @@ func TestWithdrawDelegationRewardsBasic(t *testing.T) { bankKeeper.EXPECT().SendCoinsFromModuleToAccount(ctx, disttypes.ModuleName, addr, expCommission) _, err = distrKeeper.WithdrawValidatorCommission(ctx, valAddr) require.Nil(t, err) - } func TestCalculateRewardsAfterManySlashesInSameBlock(t *testing.T) { diff --git a/x/genutil/gentx_test.go b/x/genutil/gentx_test.go index 89047d034fc3..b933bb45dd20 100644 --- a/x/genutil/gentx_test.go +++ b/x/genutil/gentx_test.go @@ -1,9 +1,14 @@ package genutil_test import ( - "cosmossdk.io/math" "encoding/json" "fmt" + "math/rand" + "testing" + "time" + + "cosmossdk.io/math" + "github.com/cosmos/cosmos-sdk/crypto/keys/secp256k1" "github.com/cosmos/cosmos-sdk/testutil" simtestutil "github.com/cosmos/cosmos-sdk/testutil/sims" @@ -19,9 +24,6 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/suite" abci "github.com/tendermint/tendermint/abci/types" - "math/rand" - "testing" - "time" ) var ( @@ -90,7 +92,6 @@ func (suite *GenTxTestSuite) setAccountBalance(balances []banktypes.Balance) jso } for _, balance := range balances { bankGenesisState.Balances = append(bankGenesisState.Balances, balance) - } for _, balance := range bankGenesisState.Balances { bankGenesisState.Supply.Add(balance.Coins...) @@ -261,7 +262,6 @@ func (suite *GenTxTestSuite) TestDeliverGenTxs() { genTxs[0] = tx }, func(_ abci.RequestDeliverTx) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{ Code: sdkerrors.ErrNoSignatures.ABCICode(), GasWanted: int64(10000000), diff --git a/x/gov/migrations/v3/store_test.go b/x/gov/migrations/v3/store_test.go index eaac29d72ec6..ce5448cbca01 100644 --- a/x/gov/migrations/v3/store_test.go +++ b/x/gov/migrations/v3/store_test.go @@ -10,7 +10,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" moduletestutil "github.com/cosmos/cosmos-sdk/types/module/testutil" "github.com/cosmos/cosmos-sdk/x/gov" - v1gov "github.com/cosmos/cosmos-sdk/x/gov/migrations/v1" + v1gov "github.com/cosmos/cosmos-sdk/x/gov/migrations/v1" v3gov "github.com/cosmos/cosmos-sdk/x/gov/migrations/v3" v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" "github.com/cosmos/cosmos-sdk/x/gov/types/v1beta1"