diff --git a/.gitignore b/.gitignore index e5f8e1cb87..d100931091 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,4 @@ docs/tla/states/ *.out vendor/ .vscode +x/ccv/provider/keyguard/__debug_bin diff --git a/Dockerfile b/Dockerfile index 7b699ac974..cc537c7ccc 100644 --- a/Dockerfile +++ b/Dockerfile @@ -39,6 +39,7 @@ COPY --from=hermes-builder /usr/bin/hermes /usr/local/bin/ COPY --from=is-builder /go/bin/interchain-security-pd /usr/local/bin/interchain-security-pd COPY --from=is-builder /go/bin/interchain-security-cd /usr/local/bin/interchain-security-cd +COPY --from=is-builder /go/bin/interchain-security-cdd /usr/local/bin/interchain-security-cdd # Copy in the shell scripts that run the testnet diff --git a/Makefile b/Makefile index 1b88e19292..3e992796b6 100644 --- a/Makefile +++ b/Makefile @@ -6,6 +6,7 @@ install: go.sum export CGO_LDFLAGS="-Wl,-z,relro,-z,now -fstack-protector" go install $(BUILD_FLAGS) ./cmd/interchain-security-pd go install $(BUILD_FLAGS) ./cmd/interchain-security-cd + go install $(BUILD_FLAGS) ./cmd/interchain-security-cdd # run all tests: unit, e2e, diff, and integration test: @@ -41,8 +42,8 @@ $(BUILDDIR)/: ### Protobuf ### ############################################################################### -containerProtoVer=v0.7 -containerProtoImage=tendermintdev/sdk-proto-gen:$(containerProtoVer) +containerProtoVer=0.9.0 +containerProtoImage=ghcr.io/cosmos/proto-builder:$(containerProtoVer) containerProtoGen=cosmos-sdk-proto-gen-$(containerProtoVer) containerProtoGenSwagger=cosmos-sdk-proto-gen-swagger-$(containerProtoVer) containerProtoFmt=cosmos-sdk-proto-fmt-$(containerProtoVer) diff --git a/app/consumer-democracy/ante_handler.go b/app/consumer-democracy/ante_handler.go new file mode 100644 index 0000000000..5e282ae06a --- /dev/null +++ b/app/consumer-democracy/ante_handler.go @@ -0,0 +1,59 @@ +package app + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/auth/ante" + ibcante "github.com/cosmos/ibc-go/v3/modules/core/ante" + ibckeeper "github.com/cosmos/ibc-go/v3/modules/core/keeper" + consumerante "github.com/cosmos/interchain-security/app/consumer/ante" + ibcconsumerkeeper "github.com/cosmos/interchain-security/x/ccv/consumer/keeper" +) + +// HandlerOptions extend the SDK's AnteHandler options by requiring the IBC +// channel keeper. +type HandlerOptions struct { + ante.HandlerOptions + + IBCKeeper *ibckeeper.Keeper + ConsumerKeeper ibcconsumerkeeper.Keeper +} + +func NewAnteHandler(options HandlerOptions) (sdk.AnteHandler, error) { + if options.AccountKeeper == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrLogic, "account keeper is required for AnteHandler") + } + if options.BankKeeper == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrLogic, "bank keeper is required for AnteHandler") + } + if options.SignModeHandler == nil { + return nil, sdkerrors.Wrap(sdkerrors.ErrLogic, "sign mode handler is required for ante builder") + } + + var sigGasConsumer = options.SigGasConsumer + if sigGasConsumer == nil { + sigGasConsumer = ante.DefaultSigVerificationGasConsumer + } + + anteDecorators := []sdk.AnteDecorator{ + ante.NewSetUpContextDecorator(), + ante.NewRejectExtensionOptionsDecorator(), + consumerante.NewMsgFilterDecorator(options.ConsumerKeeper), + consumerante.NewDisabledModulesDecorator("/cosmos.evidence", "/cosmos.slashing"), + ante.NewMempoolFeeDecorator(), + ante.NewValidateBasicDecorator(), + ante.NewTxTimeoutHeightDecorator(), + ante.NewValidateMemoDecorator(options.AccountKeeper), + ante.NewConsumeGasForTxSizeDecorator(options.AccountKeeper), + ante.NewDeductFeeDecorator(options.AccountKeeper, options.BankKeeper, options.FeegrantKeeper), + // SetPubKeyDecorator must be called before all signature verification decorators + ante.NewSetPubKeyDecorator(options.AccountKeeper), + ante.NewValidateSigCountDecorator(options.AccountKeeper), + ante.NewSigGasConsumeDecorator(options.AccountKeeper, sigGasConsumer), + ante.NewSigVerificationDecorator(options.AccountKeeper, options.SignModeHandler), + ante.NewIncrementSequenceDecorator(options.AccountKeeper), + ibcante.NewAnteDecorator(options.IBCKeeper), + } + + return sdk.ChainAnteDecorators(anteDecorators...), nil +} diff --git a/app/consumer-democracy/app.go b/app/consumer-democracy/app.go new file mode 100644 index 0000000000..1492244ce0 --- /dev/null +++ b/app/consumer-democracy/app.go @@ -0,0 +1,855 @@ +package app + +import ( + "fmt" + "io" + stdlog "log" + "net/http" + "os" + "path/filepath" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" + "github.com/cosmos/cosmos-sdk/client/rpc" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/server/api" + "github.com/cosmos/cosmos-sdk/server/config" + servertypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/simapp" + store "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/module" + "github.com/cosmos/cosmos-sdk/version" + "github.com/cosmos/cosmos-sdk/x/auth" + "github.com/cosmos/cosmos-sdk/x/auth/ante" + authrest "github.com/cosmos/cosmos-sdk/x/auth/client/rest" + authkeeper "github.com/cosmos/cosmos-sdk/x/auth/keeper" + authsims "github.com/cosmos/cosmos-sdk/x/auth/simulation" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/auth/vesting" + vestingtypes "github.com/cosmos/cosmos-sdk/x/auth/vesting/types" + "github.com/cosmos/cosmos-sdk/x/authz" + authzkeeper "github.com/cosmos/cosmos-sdk/x/authz/keeper" + authzmodule "github.com/cosmos/cosmos-sdk/x/authz/module" + "github.com/cosmos/cosmos-sdk/x/bank" + bankkeeper "github.com/cosmos/cosmos-sdk/x/bank/keeper" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + "github.com/cosmos/cosmos-sdk/x/capability" + capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + "github.com/cosmos/cosmos-sdk/x/crisis" + crisiskeeper "github.com/cosmos/cosmos-sdk/x/crisis/keeper" + crisistypes "github.com/cosmos/cosmos-sdk/x/crisis/types" + "github.com/cosmos/cosmos-sdk/x/evidence" + evidencekeeper "github.com/cosmos/cosmos-sdk/x/evidence/keeper" + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + "github.com/cosmos/cosmos-sdk/x/feegrant" + feegrantkeeper "github.com/cosmos/cosmos-sdk/x/feegrant/keeper" + feegrantmodule "github.com/cosmos/cosmos-sdk/x/feegrant/module" + "github.com/cosmos/cosmos-sdk/x/params" + paramsclient "github.com/cosmos/cosmos-sdk/x/params/client" + paramskeeper "github.com/cosmos/cosmos-sdk/x/params/keeper" + paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" + upgradeclient "github.com/cosmos/cosmos-sdk/x/upgrade/client" + + "github.com/cosmos/cosmos-sdk/x/slashing" + slashingkeeper "github.com/cosmos/cosmos-sdk/x/slashing/keeper" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + "github.com/cosmos/cosmos-sdk/x/upgrade" + upgradekeeper "github.com/cosmos/cosmos-sdk/x/upgrade/keeper" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + "github.com/cosmos/ibc-go/v3/modules/apps/transfer" + ibctransferkeeper "github.com/cosmos/ibc-go/v3/modules/apps/transfer/keeper" + ibctransfertypes "github.com/cosmos/ibc-go/v3/modules/apps/transfer/types" + ibc "github.com/cosmos/ibc-go/v3/modules/core" + ibcconnectiontypes "github.com/cosmos/ibc-go/v3/modules/core/03-connection/types" + porttypes "github.com/cosmos/ibc-go/v3/modules/core/05-port/types" + ibchost "github.com/cosmos/ibc-go/v3/modules/core/24-host" + ibckeeper "github.com/cosmos/ibc-go/v3/modules/core/keeper" + ibctesting "github.com/cosmos/ibc-go/v3/testing" + "github.com/gorilla/mux" + "github.com/rakyll/statik/fs" + "github.com/spf13/cast" + "github.com/tendermint/spm/cosmoscmd" + abci "github.com/tendermint/tendermint/abci/types" + tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/libs/log" + tmos "github.com/tendermint/tendermint/libs/os" + dbm "github.com/tendermint/tm-db" + + distr "github.com/cosmos/cosmos-sdk/x/distribution" + ccvdistrclient "github.com/cosmos/cosmos-sdk/x/distribution/client" + ccvdistrkeeper "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + ccvdistrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + ccvdistr "github.com/cosmos/interchain-security/x/ccv/democracy/distribution" + + ccvstakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + ccvstakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + ccvstaking "github.com/cosmos/interchain-security/x/ccv/democracy/staking" + + ccvgov "github.com/cosmos/cosmos-sdk/x/gov" + ccvgovkeeper "github.com/cosmos/cosmos-sdk/x/gov/keeper" + ccvgovtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + // add mint + ccvmint "github.com/cosmos/cosmos-sdk/x/mint" + ccvmintkeeper "github.com/cosmos/cosmos-sdk/x/mint/keeper" + ccvminttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + paramproposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + ibcconsumer "github.com/cosmos/interchain-security/x/ccv/consumer" + ibcconsumerkeeper "github.com/cosmos/interchain-security/x/ccv/consumer/keeper" + ibcconsumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + + // unnamed import of statik for swagger UI support + _ "github.com/cosmos/cosmos-sdk/client/docs/statik" + ibcclienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" +) + +const ( + AppName = "interchain-security-cd" + upgradeName = "v07-Theta" + AccountAddressPrefix = "cosmos" +) + +var ( + // DefaultNodeHome default home directories for the application daemon + DefaultNodeHome string + + // ModuleBasics defines the module BasicManager is in charge of setting up basic, + // non-dependant module elements, such as codec registration + // and genesis verification. + ModuleBasics = module.NewBasicManager( + auth.AppModuleBasic{}, + bank.AppModuleBasic{}, + capability.AppModuleBasic{}, + ccvstaking.AppModuleBasic{}, + ccvmint.AppModuleBasic{}, + ccvdistr.AppModuleBasic{}, + ccvgov.NewAppModuleBasic( + // TODO: eventually remove upgrade proposal handler and cancel proposal handler + paramsclient.ProposalHandler, ccvdistrclient.ProposalHandler, upgradeclient.ProposalHandler, upgradeclient.CancelProposalHandler, + ), + params.AppModuleBasic{}, + crisis.AppModuleBasic{}, + slashing.AppModuleBasic{}, + feegrantmodule.AppModuleBasic{}, + authzmodule.AppModuleBasic{}, + ibc.AppModuleBasic{}, + upgrade.AppModuleBasic{}, + evidence.AppModuleBasic{}, + transfer.AppModuleBasic{}, + vesting.AppModuleBasic{}, + //router.AppModuleBasic{}, + ibcconsumer.AppModuleBasic{}, + ) + + // module account permissions + maccPerms = map[string][]string{ + authtypes.FeeCollectorName: nil, + ccvstakingtypes.BondedPoolName: {authtypes.Burner, authtypes.Staking}, + ccvstakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking}, + ccvdistrtypes.ModuleName: nil, + ccvminttypes.ModuleName: {authtypes.Minter}, + ibcconsumertypes.ConsumerRedistributeName: nil, + ibcconsumertypes.ConsumerToSendToProviderName: nil, + ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + ccvgovtypes.ModuleName: {authtypes.Burner}, + } +) + +var ( + _ simapp.App = (*App)(nil) + _ servertypes.Application = (*App)(nil) + _ cosmoscmd.CosmosApp = (*App)(nil) + _ ibctesting.TestingApp = (*App)(nil) +) + +// App extends an ABCI application, but with most of its parameters exported. +// They are exported for convenience in creating helper functions, as object +// capabilities aren't needed for testing. +type App struct { // nolint: golint + *baseapp.BaseApp + legacyAmino *codec.LegacyAmino + appCodec codec.Codec + interfaceRegistry types.InterfaceRegistry + + invCheckPeriod uint + + // keys to access the substores + keys map[string]*sdk.KVStoreKey + tkeys map[string]*sdk.TransientStoreKey + memKeys map[string]*sdk.MemoryStoreKey + + // keepers + AccountKeeper authkeeper.AccountKeeper + BankKeeper bankkeeper.Keeper + CapabilityKeeper *capabilitykeeper.Keeper + StakingKeeper ccvstakingkeeper.Keeper + SlashingKeeper slashingkeeper.Keeper + MintKeeper ccvmintkeeper.Keeper + DistrKeeper ccvdistrkeeper.Keeper + GovKeeper ccvgovkeeper.Keeper + CrisisKeeper crisiskeeper.Keeper + UpgradeKeeper upgradekeeper.Keeper + ParamsKeeper paramskeeper.Keeper + IBCKeeper *ibckeeper.Keeper // IBC Keeper must be a pointer in the app, so we can SetRouter on it correctly + EvidenceKeeper evidencekeeper.Keeper + TransferKeeper ibctransferkeeper.Keeper + FeeGrantKeeper feegrantkeeper.Keeper + AuthzKeeper authzkeeper.Keeper + ConsumerKeeper ibcconsumerkeeper.Keeper + + // make scoped keepers public for test purposes + ScopedIBCKeeper capabilitykeeper.ScopedKeeper + ScopedTransferKeeper capabilitykeeper.ScopedKeeper + ScopedIBCConsumerKeeper capabilitykeeper.ScopedKeeper + + // the module manager + MM *module.Manager + + // simulation manager + sm *module.SimulationManager + configurator module.Configurator +} + +func init() { + userHomeDir, err := os.UserHomeDir() + if err != nil { + stdlog.Println("Failed to get home dir %2", err) + } + + DefaultNodeHome = filepath.Join(userHomeDir, "."+AppName) +} + +// New returns a reference to an initialized App. +func New( + logger log.Logger, + db dbm.DB, + traceStore io.Writer, + loadLatest bool, + skipUpgradeHeights map[int64]bool, + homePath string, + invCheckPeriod uint, + encodingConfig cosmoscmd.EncodingConfig, + appOpts servertypes.AppOptions, + baseAppOptions ...func(*baseapp.BaseApp), +) cosmoscmd.App { + + appCodec := encodingConfig.Marshaler + legacyAmino := encodingConfig.Amino + interfaceRegistry := encodingConfig.InterfaceRegistry + + bApp := baseapp.NewBaseApp(AppName, logger, db, encodingConfig.TxConfig.TxDecoder(), baseAppOptions...) + bApp.SetCommitMultiStoreTracer(traceStore) + bApp.SetVersion(version.Version) + bApp.SetInterfaceRegistry(interfaceRegistry) + + keys := sdk.NewKVStoreKeys( + authtypes.StoreKey, banktypes.StoreKey, ccvstakingtypes.StoreKey, + ccvminttypes.StoreKey, ccvdistrtypes.StoreKey, slashingtypes.StoreKey, + ccvgovtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, + capabilitytypes.StoreKey, authzkeeper.StoreKey, + ibcconsumertypes.StoreKey, + ) + tkeys := sdk.NewTransientStoreKeys(paramstypes.TStoreKey) + memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey) + + app := &App{ + BaseApp: bApp, + legacyAmino: legacyAmino, + appCodec: appCodec, + interfaceRegistry: interfaceRegistry, + invCheckPeriod: invCheckPeriod, + keys: keys, + tkeys: tkeys, + memKeys: memKeys, + } + + app.ParamsKeeper = initParamsKeeper( + appCodec, + legacyAmino, + keys[paramstypes.StoreKey], + tkeys[paramstypes.TStoreKey], + ) + + // set the BaseApp's parameter store + bApp.SetParamStore( + app.ParamsKeeper.Subspace(baseapp.Paramspace).WithKeyTable( + paramskeeper.ConsensusParamsKeyTable()), + ) + + // add capability keeper and ScopeToModule for ibc module + app.CapabilityKeeper = capabilitykeeper.NewKeeper( + appCodec, + keys[capabilitytypes.StoreKey], + memKeys[capabilitytypes.MemStoreKey], + ) + scopedIBCKeeper := app.CapabilityKeeper.ScopeToModule(ibchost.ModuleName) + scopedTransferKeeper := app.CapabilityKeeper.ScopeToModule(ibctransfertypes.ModuleName) + scopedIBCConsumerKeeper := app.CapabilityKeeper.ScopeToModule(ibcconsumertypes.ModuleName) + app.CapabilityKeeper.Seal() + + // add keepers + app.AccountKeeper = authkeeper.NewAccountKeeper( + appCodec, + keys[authtypes.StoreKey], + app.GetSubspace(authtypes.ModuleName), + authtypes.ProtoBaseAccount, + maccPerms, + ) + + // Remove the fee-pool from the group of blocked recipient addresses in bank + // this is required for the provider chain to be able to receive tokens from + // the consumer chain + bankBlockedAddrs := app.ModuleAccountAddrs() + delete(bankBlockedAddrs, authtypes.NewModuleAddress( + authtypes.FeeCollectorName).String()) + + app.BankKeeper = bankkeeper.NewBaseKeeper( + appCodec, + keys[banktypes.StoreKey], + app.AccountKeeper, + app.GetSubspace(banktypes.ModuleName), + bankBlockedAddrs, + ) + app.AuthzKeeper = authzkeeper.NewKeeper( + keys[authzkeeper.StoreKey], + appCodec, + app.BaseApp.MsgServiceRouter(), + ) + app.FeeGrantKeeper = feegrantkeeper.NewKeeper( + appCodec, + keys[feegrant.StoreKey], + app.AccountKeeper, + ) + + ccvstakingKeeper := ccvstakingkeeper.NewKeeper( + appCodec, + keys[ccvstakingtypes.StoreKey], + app.AccountKeeper, + app.BankKeeper, + app.GetSubspace(ccvstakingtypes.ModuleName), + ) + + app.MintKeeper = ccvmintkeeper.NewKeeper( + appCodec, keys[ccvminttypes.StoreKey], app.GetSubspace(ccvminttypes.ModuleName), &ccvstakingKeeper, + app.AccountKeeper, app.BankKeeper, authtypes.FeeCollectorName, + ) + + app.SlashingKeeper = slashingkeeper.NewKeeper( + appCodec, + keys[slashingtypes.StoreKey], + &app.ConsumerKeeper, + app.GetSubspace(slashingtypes.ModuleName), + ) + app.DistrKeeper = ccvdistrkeeper.NewKeeper( + appCodec, + keys[ccvdistrtypes.StoreKey], + app.GetSubspace(ccvdistrtypes.ModuleName), + app.AccountKeeper, + app.BankKeeper, + &ccvstakingKeeper, + ibcconsumertypes.ConsumerRedistributeName, + app.ModuleAccountAddrs(), + ) + app.CrisisKeeper = crisiskeeper.NewKeeper( + app.GetSubspace(crisistypes.ModuleName), + invCheckPeriod, + app.BankKeeper, + authtypes.FeeCollectorName, + ) + app.UpgradeKeeper = upgradekeeper.NewKeeper( + skipUpgradeHeights, + keys[upgradetypes.StoreKey], + appCodec, + homePath, + app.BaseApp, + ) + + // register the staking hooks + // NOTE: stakingKeeper above is passed by reference, so that it will contain these hooks + // NOTE: slashing hook was removed since it's only relevant for consumerKeeper + app.StakingKeeper = *ccvstakingKeeper.SetHooks( + ccvstakingtypes.NewMultiStakingHooks(app.DistrKeeper.Hooks()), + ) + + // register the proposal types + ccvgovRouter := ccvgovtypes.NewRouter() + ccvgovRouter.AddRoute(ccvgovtypes.RouterKey, ccvgovtypes.ProposalHandler). + AddRoute(paramproposal.RouterKey, params.NewParamChangeProposalHandler(app.ParamsKeeper)). + AddRoute(ccvdistrtypes.RouterKey, distr.NewCommunityPoolSpendProposalHandler(app.DistrKeeper)). + // TODO: remove upgrade handler from gov once admin module or decision for only signaling proposal is made. + AddRoute(upgradetypes.RouterKey, upgrade.NewSoftwareUpgradeProposalHandler(app.UpgradeKeeper)) + govKeeper := ccvgovkeeper.NewKeeper( + appCodec, keys[ccvgovtypes.StoreKey], app.GetSubspace(ccvgovtypes.ModuleName), app.AccountKeeper, app.BankKeeper, + &ccvstakingKeeper, ccvgovRouter, + ) + + app.GovKeeper = *govKeeper.SetHooks( + ccvgovtypes.NewMultiGovHooks( + // register the governance hooks + ), + ) + + app.IBCKeeper = ibckeeper.NewKeeper( + appCodec, + keys[ibchost.StoreKey], + app.GetSubspace(ibchost.ModuleName), + &app.ConsumerKeeper, + app.UpgradeKeeper, + scopedIBCKeeper, + ) + + // Create CCV consumer and modules + app.ConsumerKeeper = ibcconsumerkeeper.NewKeeper( + appCodec, + keys[ibcconsumertypes.StoreKey], + app.GetSubspace(ibcconsumertypes.ModuleName), + scopedIBCConsumerKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.IBCKeeper.ConnectionKeeper, + app.IBCKeeper.ClientKeeper, + app.SlashingKeeper, + app.BankKeeper, + app.AccountKeeper, + &app.TransferKeeper, + app.IBCKeeper, + authtypes.FeeCollectorName, + ) + + // consumer keeper satisfies the staking keeper interface + // of the slashing module + app.SlashingKeeper = slashingkeeper.NewKeeper( + appCodec, + keys[slashingtypes.StoreKey], + &app.ConsumerKeeper, + app.GetSubspace(slashingtypes.ModuleName), + ) + + // register slashing module StakingHooks to the consumer keeper + app.ConsumerKeeper = *app.ConsumerKeeper.SetHooks(app.SlashingKeeper.Hooks()) + consumerModule := ibcconsumer.NewAppModule(app.ConsumerKeeper) + + app.TransferKeeper = ibctransferkeeper.NewKeeper( + appCodec, + keys[ibctransfertypes.StoreKey], + app.GetSubspace(ibctransfertypes.ModuleName), + app.IBCKeeper.ChannelKeeper, + app.IBCKeeper.ChannelKeeper, + &app.IBCKeeper.PortKeeper, + app.AccountKeeper, + app.BankKeeper, + scopedTransferKeeper, + ) + transferModule := transfer.NewAppModule(app.TransferKeeper) + ibcmodule := transfer.NewIBCModule(app.TransferKeeper) + + // create static IBC router, add transfer route, then set and seal it + ibcRouter := porttypes.NewRouter() + ibcRouter.AddRoute(ibctransfertypes.ModuleName, ibcmodule) + ibcRouter.AddRoute(ibcconsumertypes.ModuleName, consumerModule) + app.IBCKeeper.SetRouter(ibcRouter) + + // create evidence keeper with router + evidenceKeeper := evidencekeeper.NewKeeper( + appCodec, + keys[evidencetypes.StoreKey], + &app.ConsumerKeeper, + app.SlashingKeeper, + ) + + app.EvidenceKeeper = *evidenceKeeper + + skipGenesisInvariants := cast.ToBool(appOpts.Get(crisis.FlagSkipGenesisInvariants)) + + // NOTE: Any module instantiated in the module manager that is later modified + // must be passed by reference here. + app.MM = module.NewManager( + auth.NewAppModule(appCodec, app.AccountKeeper, nil), + vesting.NewAppModule(app.AccountKeeper, app.BankKeeper), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper), + capability.NewAppModule(appCodec, *app.CapabilityKeeper), + crisis.NewAppModule(&app.CrisisKeeper, skipGenesisInvariants), + feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), + ccvgov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper), + ccvmint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.ConsumerKeeper), + ccvdistr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName), + ccvstaking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), + upgrade.NewAppModule(app.UpgradeKeeper), + evidence.NewAppModule(app.EvidenceKeeper), + params.NewAppModule(app.ParamsKeeper), + authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), + ibc.NewAppModule(app.IBCKeeper), + transferModule, + consumerModule, + ) + + // During begin block slashing happens after distr.BeginBlocker so that + // there is nothing left over in the validator fee pool, so as to keep the + // CanWithdrawInvariant invariant. + // NOTE: staking module is required if HistoricalEntries param > 0 + // NOTE: capability module's beginblocker must come before any modules using capabilities (e.g. IBC) + app.MM.SetOrderBeginBlockers( + // upgrades should be run first + upgradetypes.ModuleName, + capabilitytypes.ModuleName, + ccvminttypes.ModuleName, + ccvdistrtypes.ModuleName, + slashingtypes.ModuleName, + evidencetypes.ModuleName, + ccvstakingtypes.ModuleName, + authtypes.ModuleName, + banktypes.ModuleName, + ccvgovtypes.ModuleName, + crisistypes.ModuleName, + authz.ModuleName, + feegrant.ModuleName, + paramstypes.ModuleName, + vestingtypes.ModuleName, + ibctransfertypes.ModuleName, + ibchost.ModuleName, + ibcconsumertypes.ModuleName, + ) + app.MM.SetOrderEndBlockers( + crisistypes.ModuleName, + ccvgovtypes.ModuleName, + ccvstakingtypes.ModuleName, + capabilitytypes.ModuleName, + authtypes.ModuleName, + banktypes.ModuleName, + ccvdistrtypes.ModuleName, + slashingtypes.ModuleName, + ccvminttypes.ModuleName, + evidencetypes.ModuleName, + authz.ModuleName, + feegrant.ModuleName, + paramstypes.ModuleName, + upgradetypes.ModuleName, + vestingtypes.ModuleName, + ibctransfertypes.ModuleName, + ibchost.ModuleName, + ibcconsumertypes.ModuleName, + ) + + // NOTE: The genutils module must occur after staking so that pools are + // properly initialized with tokens from genesis accounts. + // NOTE: The genutils module must also occur after auth so that it can access the params from auth. + // NOTE: Capability module must occur first so that it can initialize any capabilities + // so that other modules that want to create or claim capabilities afterwards in InitChain + // can do so safely. + app.MM.SetOrderInitGenesis( + capabilitytypes.ModuleName, + authtypes.ModuleName, + banktypes.ModuleName, + ccvdistrtypes.ModuleName, + ccvstakingtypes.ModuleName, + slashingtypes.ModuleName, + ccvgovtypes.ModuleName, + ccvminttypes.ModuleName, + crisistypes.ModuleName, + evidencetypes.ModuleName, + authz.ModuleName, + feegrant.ModuleName, + paramstypes.ModuleName, + upgradetypes.ModuleName, + vestingtypes.ModuleName, + ibchost.ModuleName, + ibctransfertypes.ModuleName, + ibcconsumertypes.ModuleName, + ) + + app.MM.RegisterInvariants(&app.CrisisKeeper) + app.MM.RegisterRoutes(app.Router(), app.QueryRouter(), encodingConfig.Amino) + + app.configurator = module.NewConfigurator(app.appCodec, app.MsgServiceRouter(), app.GRPCQueryRouter()) + app.MM.RegisterServices(app.configurator) + + // create the simulation manager and define the order of the modules for deterministic simulations + // + // NOTE: this is not required apps that don't use the simulator for fuzz testing + // transactions + app.sm = module.NewSimulationManager( + auth.NewAppModule(appCodec, app.AccountKeeper, authsims.RandomGenesisAccounts), + bank.NewAppModule(appCodec, app.BankKeeper, app.AccountKeeper), + capability.NewAppModule(appCodec, *app.CapabilityKeeper), + feegrantmodule.NewAppModule(appCodec, app.AccountKeeper, app.BankKeeper, app.FeeGrantKeeper, app.interfaceRegistry), + ccvgov.NewAppModule(appCodec, app.GovKeeper, app.AccountKeeper, app.BankKeeper), + ccvmint.NewAppModule(appCodec, app.MintKeeper, app.AccountKeeper), + ccvstaking.NewAppModule(appCodec, app.StakingKeeper, app.AccountKeeper, app.BankKeeper), + ccvdistr.NewAppModule(appCodec, app.DistrKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper, authtypes.FeeCollectorName), + slashing.NewAppModule(appCodec, app.SlashingKeeper, app.AccountKeeper, app.BankKeeper, app.StakingKeeper), + params.NewAppModule(app.ParamsKeeper), + evidence.NewAppModule(app.EvidenceKeeper), ibc.NewAppModule(app.IBCKeeper), + authzmodule.NewAppModule(appCodec, app.AuthzKeeper, app.AccountKeeper, app.BankKeeper, app.interfaceRegistry), + transferModule, + ) + + app.sm.RegisterStoreDecoders() + + // initialize stores + app.MountKVStores(keys) + app.MountTransientStores(tkeys) + app.MountMemoryStores(memKeys) + + anteHandler, err := NewAnteHandler( + HandlerOptions{ + HandlerOptions: ante.HandlerOptions{ + AccountKeeper: app.AccountKeeper, + BankKeeper: app.BankKeeper, + FeegrantKeeper: app.FeeGrantKeeper, + SignModeHandler: encodingConfig.TxConfig.SignModeHandler(), + SigGasConsumer: ante.DefaultSigVerificationGasConsumer, + }, + IBCKeeper: app.IBCKeeper, + ConsumerKeeper: app.ConsumerKeeper, + }, + ) + if err != nil { + panic(fmt.Errorf("failed to create AnteHandler: %s", err)) + } + app.SetAnteHandler(anteHandler) + + app.SetInitChainer(app.InitChainer) + app.SetBeginBlocker(app.BeginBlocker) + app.SetEndBlocker(app.EndBlocker) + + app.UpgradeKeeper.SetUpgradeHandler( + upgradeName, + func(ctx sdk.Context, _ upgradetypes.Plan, _ module.VersionMap) (module.VersionMap, error) { + app.IBCKeeper.ConnectionKeeper.SetParams(ctx, ibcconnectiontypes.DefaultParams()) + + fromVM := make(map[string]uint64) + + for moduleName, eachModule := range app.MM.Modules { + fromVM[moduleName] = eachModule.ConsensusVersion() + } + + ctx.Logger().Info("start to run module migrations...") + + return app.MM.RunMigrations(ctx, app.configurator, fromVM) + }, + ) + + upgradeInfo, err := app.UpgradeKeeper.ReadUpgradeInfoFromDisk() + if err != nil { + panic(fmt.Sprintf("failed to read upgrade info from disk %s", err)) + } + + if upgradeInfo.Name == upgradeName && !app.UpgradeKeeper.IsSkipHeight(upgradeInfo.Height) { + storeUpgrades := store.StoreUpgrades{} + + // configure store loader that checks if version == upgradeHeight and applies store upgrades + app.SetStoreLoader(upgradetypes.UpgradeStoreLoader(upgradeInfo.Height, &storeUpgrades)) + } + + if loadLatest { + if err := app.LoadLatestVersion(); err != nil { + tmos.Exit(fmt.Sprintf("failed to load latest version: %s", err)) + } + } + + app.ScopedIBCKeeper = scopedIBCKeeper + app.ScopedTransferKeeper = scopedTransferKeeper + app.ScopedIBCConsumerKeeper = scopedIBCConsumerKeeper + + return app +} + +// Name returns the name of the App +func (app *App) Name() string { return app.BaseApp.Name() } + +// BeginBlocker application updates every begin block +func (app *App) BeginBlocker(ctx sdk.Context, req abci.RequestBeginBlock) abci.ResponseBeginBlock { + return app.MM.BeginBlock(ctx, req) +} + +// EndBlocker application updates every end block +func (app *App) EndBlocker(ctx sdk.Context, req abci.RequestEndBlock) abci.ResponseEndBlock { + return app.MM.EndBlock(ctx, req) +} + +// InitChainer application update at chain initialization +func (app *App) InitChainer(ctx sdk.Context, req abci.RequestInitChain) abci.ResponseInitChain { + var genesisState GenesisState + if err := tmjson.Unmarshal(req.AppStateBytes, &genesisState); err != nil { + panic(err) + } + + app.UpgradeKeeper.SetModuleVersionMap(ctx, app.MM.GetVersionMap()) + return app.MM.InitGenesis(ctx, app.appCodec, genesisState) +} + +// LoadHeight loads a particular height +func (app *App) LoadHeight(height int64) error { + return app.LoadVersion(height) +} + +// ModuleAccountAddrs returns all the app's module account addresses. +func (app *App) ModuleAccountAddrs() map[string]bool { + modAccAddrs := make(map[string]bool) + for acc := range maccPerms { + modAccAddrs[authtypes.NewModuleAddress(acc).String()] = true + } + + return modAccAddrs +} + +// LegacyAmino returns App's amino codec. +// +// NOTE: This is solely to be used for testing purposes as it may be desirable +// for modules to register their own custom testing types. +func (app *App) LegacyAmino() *codec.LegacyAmino { + return app.legacyAmino +} + +// AppCodec returns the app codec. +// +// NOTE: This is solely to be used for testing purposes as it may be desirable +// for modules to register their own custom testing types. +func (app *App) AppCodec() codec.Codec { + return app.appCodec +} + +// InterfaceRegistry returns the InterfaceRegistry +func (app *App) InterfaceRegistry() types.InterfaceRegistry { + return app.interfaceRegistry +} + +// GetKey returns the KVStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (app *App) GetKey(storeKey string) *sdk.KVStoreKey { + return app.keys[storeKey] +} + +// GetTKey returns the TransientStoreKey for the provided store key. +// +// NOTE: This is solely to be used for testing purposes. +func (app *App) GetTKey(storeKey string) *sdk.TransientStoreKey { + return app.tkeys[storeKey] +} + +// GetMemKey returns the MemStoreKey for the provided mem key. +// +// NOTE: This is solely used for testing purposes. +func (app *App) GetMemKey(storeKey string) *sdk.MemoryStoreKey { + return app.memKeys[storeKey] +} + +// GetSubspace returns a param subspace for a given module name. +// +// NOTE: This is solely to be used for testing purposes. +func (app *App) GetSubspace(moduleName string) paramstypes.Subspace { + subspace, _ := app.ParamsKeeper.GetSubspace(moduleName) + return subspace +} + +// SimulationManager implements the SimulationApp interface +func (app *App) SimulationManager() *module.SimulationManager { + return app.sm +} + +// TestingApp functions + +// GetBaseApp implements the TestingApp interface. +func (app *App) GetBaseApp() *baseapp.BaseApp { + return app.BaseApp +} + +// GetStakingKeeper implements the TestingApp interface. +func (app *App) GetStakingKeeper() ibcclienttypes.StakingKeeper { + return app.ConsumerKeeper +} + +// GetIBCKeeper implements the TestingApp interface. +func (app *App) GetIBCKeeper() *ibckeeper.Keeper { + return app.IBCKeeper +} + +// GetScopedIBCKeeper implements the TestingApp interface. +func (app *App) GetScopedIBCKeeper() capabilitykeeper.ScopedKeeper { + return app.ScopedIBCKeeper +} + +// GetTxConfig implements the TestingApp interface. +func (app *App) GetTxConfig() client.TxConfig { + return cosmoscmd.MakeEncodingConfig(ModuleBasics).TxConfig +} + +// RegisterAPIRoutes registers all application module routes with the provided +// API server. +func (app *App) RegisterAPIRoutes(apiSvr *api.Server, apiConfig config.APIConfig) { + clientCtx := apiSvr.ClientCtx + rpc.RegisterRoutes(clientCtx, apiSvr.Router) + // Register legacy tx routes. + authrest.RegisterTxRoutes(clientCtx, apiSvr.Router) + // Register new tx routes from grpc-gateway. + authtx.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) + // Register new tendermint queries routes from grpc-gateway. + tmservice.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) + + // Register legacy and grpc-gateway routes for all modules. + ModuleBasics.RegisterRESTRoutes(clientCtx, apiSvr.Router) + ModuleBasics.RegisterGRPCGatewayRoutes(clientCtx, apiSvr.GRPCGatewayRouter) + + // register swagger API from root so that other applications can override easily + if apiConfig.Swagger { + RegisterSwaggerAPI(apiSvr.Router) + } +} + +// RegisterTxService implements the Application.RegisterTxService method. +func (app *App) RegisterTxService(clientCtx client.Context) { + authtx.RegisterTxService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.BaseApp.Simulate, app.interfaceRegistry) +} + +// RegisterTendermintService implements the Application.RegisterTendermintService method. +func (app *App) RegisterTendermintService(clientCtx client.Context) { + tmservice.RegisterTendermintService(app.BaseApp.GRPCQueryRouter(), clientCtx, app.interfaceRegistry) +} + +// RegisterSwaggerAPI registers swagger route with API Server +func RegisterSwaggerAPI(rtr *mux.Router) { + statikFS, err := fs.New() + if err != nil { + panic(err) + } + + staticServer := http.FileServer(statikFS) + rtr.PathPrefix("/swagger/").Handler(http.StripPrefix("/swagger/", staticServer)) +} + +// GetMaccPerms returns a copy of the module account permissions +func GetMaccPerms() map[string][]string { + dupMaccPerms := make(map[string][]string) + for k, v := range maccPerms { + dupMaccPerms[k] = v + } + return dupMaccPerms +} + +// initParamsKeeper init params keeper and its subspaces +func initParamsKeeper(appCodec codec.BinaryCodec, legacyAmino *codec.LegacyAmino, key, tkey sdk.StoreKey) paramskeeper.Keeper { + paramsKeeper := paramskeeper.NewKeeper(appCodec, legacyAmino, key, tkey) + + paramsKeeper.Subspace(authtypes.ModuleName) + paramsKeeper.Subspace(banktypes.ModuleName) + paramsKeeper.Subspace(ccvstakingtypes.ModuleName) + paramsKeeper.Subspace(ccvminttypes.ModuleName) + paramsKeeper.Subspace(ccvdistrtypes.ModuleName) + paramsKeeper.Subspace(slashingtypes.ModuleName) + paramsKeeper.Subspace(ccvgovtypes.ModuleName).WithKeyTable(ccvgovtypes.ParamKeyTable()) + paramsKeeper.Subspace(crisistypes.ModuleName) + paramsKeeper.Subspace(ibctransfertypes.ModuleName) + paramsKeeper.Subspace(ibchost.ModuleName) + paramsKeeper.Subspace(ibcconsumertypes.ModuleName) + + return paramsKeeper +} diff --git a/app/consumer-democracy/export.go b/app/consumer-democracy/export.go new file mode 100644 index 0000000000..c146ab7ad2 --- /dev/null +++ b/app/consumer-democracy/export.go @@ -0,0 +1,198 @@ +package app + +import ( + "encoding/json" + "fmt" + + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + + servertypes "github.com/cosmos/cosmos-sdk/server/types" + sdk "github.com/cosmos/cosmos-sdk/types" + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + tmtypes "github.com/tendermint/tendermint/types" +) + +// ExportAppStateAndValidators exports the state of the application for a genesis +// file. +func (app *App) ExportAppStateAndValidators( + forZeroHeight bool, jailAllowedAddrs []string, +) (servertypes.ExportedApp, error) { + + // as if they could withdraw from the start of the next block + ctx := app.NewContext(true, tmproto.Header{Height: app.LastBlockHeight()}) + + // We export at last height + 1, because that's the height at which + // Tendermint will start InitChain. + height := app.LastBlockHeight() + 1 + if forZeroHeight { + height = 0 + app.prepForZeroHeightGenesis(ctx, jailAllowedAddrs) + } + + genState := app.MM.ExportGenesis(ctx, app.appCodec) + appState, err := json.MarshalIndent(genState, "", " ") + if err != nil { + return servertypes.ExportedApp{}, err + } + + validators, err := app.GetValidatorSet(ctx) + if err != nil { + return servertypes.ExportedApp{}, err + } + return servertypes.ExportedApp{ + AppState: appState, + Validators: validators, + Height: height, + ConsensusParams: app.BaseApp.GetConsensusParams(ctx), + }, nil +} + +// prepare for fresh start at zero height +// NOTE zero height genesis is a temporary feature which will be deprecated +// in favour of export at a block height +func (app *App) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) { + // applyAllowedAddrs := false + + // check if there is a allowed address list + // if len(jailAllowedAddrs) > 0 { + // applyAllowedAddrs = true + // } + + // allowedAddrsMap := make(map[string]bool) + + // for _, addr := range jailAllowedAddrs { + // _, err := sdk.ValAddressFromBech32(addr) + // if err != nil { + // log.Fatal(err) + // } + // allowedAddrsMap[addr] = true + // } + + /* Just to be safe, assert the invariants on current state. */ + app.CrisisKeeper.AssertInvariants(ctx) + + /* Handle fee distribution state. */ + + // withdraw all validator commission + // app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + // _, err := app.DistrKeeper.WithdrawValidatorCommission(ctx, val.GetOperator()) + // if err != nil { + // panic(err) + // } + // return false + // }) + + // withdraw all delegator rewards + // dels := app.StakingKeeper.GetAllDelegations(ctx) + // for _, delegation := range dels { + // _, err := app.DistrKeeper.WithdrawDelegationRewards(ctx, delegation.GetDelegatorAddr(), delegation.GetValidatorAddr()) + // if err != nil { + // panic(err) + // } + // } + + // clear validator slash events + // app.DistrKeeper.DeleteAllValidatorSlashEvents(ctx) + + // clear validator historical rewards + // app.DistrKeeper.DeleteAllValidatorHistoricalRewards(ctx) + + // set context height to zero + height := ctx.BlockHeight() + ctx = ctx.WithBlockHeight(0) + + // reinitialize all validators + // app.StakingKeeper.IterateValidators(ctx, func(_ int64, val stakingtypes.ValidatorI) (stop bool) { + // // donate any unwithdrawn outstanding reward fraction tokens to the community pool + // scraps := app.DistrKeeper.GetValidatorOutstandingRewardsCoins(ctx, val.GetOperator()) + // feePool := app.DistrKeeper.GetFeePool(ctx) + // feePool.CommunityPool = feePool.CommunityPool.Add(scraps...) + // app.DistrKeeper.SetFeePool(ctx, feePool) + + // app.DistrKeeper.Hooks().AfterValidatorCreated(ctx, val.GetOperator()) + // return false + // }) + + // reinitialize all delegations + // for _, del := range dels { + // app.DistrKeeper.Hooks().BeforeDelegationCreated(ctx, del.GetDelegatorAddr(), del.GetValidatorAddr()) + // app.DistrKeeper.Hooks().AfterDelegationModified(ctx, del.GetDelegatorAddr(), del.GetValidatorAddr()) + // } + + // reset context height + ctx = ctx.WithBlockHeight(height) + + /* Handle staking state. */ + + // iterate through redelegations, reset creation height + // app.StakingKeeper.IterateRedelegations(ctx, func(_ int64, red stakingtypes.Redelegation) (stop bool) { + // for i := range red.Entries { + // red.Entries[i].CreationHeight = 0 + // } + // app.StakingKeeper.SetRedelegation(ctx, red) + // return false + // }) + + // iterate through unbonding delegations, reset creation height + // app.StakingKeeper.IterateUnbondingDelegations(ctx, func(_ int64, ubd stakingtypes.UnbondingDelegation) (stop bool) { + // for i := range ubd.Entries { + // ubd.Entries[i].CreationHeight = 0 + // } + // app.StakingKeeper.SetUnbondingDelegation(ctx, ubd) + // return false + // }) + + // Iterate through validators by power descending, reset bond heights, and + // update bond intra-tx counters. + // store := ctx.KVStore(app.keys[stakingtypes.StoreKey]) + // iter := sdk.KVStoreReversePrefixIterator(store, stakingtypes.ValidatorsKey) + // counter := int16(0) + + // for ; iter.Valid(); iter.Next() { + // addr := sdk.ValAddress(iter.Key()[1:]) + // validator, found := app.StakingKeeper.GetValidator(ctx, addr) + // if !found { + // panic("expected validator, not found") + // } + + // validator.UnbondingHeight = 0 + // if applyAllowedAddrs && !allowedAddrsMap[addr.String()] { + // validator.Jailed = true + // } + + // app.StakingKeeper.SetValidator(ctx, validator) + // counter++ + // } + + // iter.Close() + + // if _, err := app.StakingKeeper.ApplyAndReturnValidatorSetUpdates(ctx); err != nil { + // panic(err) + // } + + /* Handle slashing state. */ + + // reset start height on signing infos + app.SlashingKeeper.IterateValidatorSigningInfos( + ctx, + func(addr sdk.ConsAddress, info slashingtypes.ValidatorSigningInfo) (stop bool) { + info.StartHeight = 0 + app.SlashingKeeper.SetValidatorSigningInfo(ctx, addr, info) + return false + }, + ) +} + +// GetValidatorSet returns a slice of bonded validators. +func (app *App) GetValidatorSet(ctx sdk.Context) ([]tmtypes.GenesisValidator, error) { + cVals := app.ConsumerKeeper.GetAllCCValidator(ctx) + if len(cVals) == 0 { + return nil, fmt.Errorf("empty validator set") + } + + vals := []tmtypes.GenesisValidator{} + for _, v := range cVals { + vals = append(vals, tmtypes.GenesisValidator{Address: v.Address, Power: v.Power}) + } + return vals, nil +} diff --git a/app/consumer-democracy/genesis.go b/app/consumer-democracy/genesis.go new file mode 100644 index 0000000000..5bf0c1da80 --- /dev/null +++ b/app/consumer-democracy/genesis.go @@ -0,0 +1,21 @@ +package app + +import ( + "encoding/json" + + "github.com/cosmos/cosmos-sdk/codec" +) + +// The genesis state of the blockchain is represented here as a map of raw json +// messages key'd by a identifier string. +// The identifier is used to determine which module genesis information belongs +// to so it may be appropriately routed during init chain. +// Within this application default genesis information is retrieved from +// the ModuleBasicManager which populates json from each BasicModule +// object provided to it during init. +type GenesisState map[string]json.RawMessage + +// NewDefaultGenesisState generates the default state for the application. +func NewDefaultGenesisState(cdc codec.JSONCodec) GenesisState { + return ModuleBasics.DefaultGenesis(cdc) +} diff --git a/app/consumer/app.go b/app/consumer/app.go index 7f60c0dc47..7bf07e3851 100644 --- a/app/consumer/app.go +++ b/app/consumer/app.go @@ -347,7 +347,7 @@ func New( app.GetSubspace(slashingtypes.ModuleName), ) - // register slashing module StakingHooks to the consumer keeper + // register slashing module Slashing hooks to the consumer keeper app.ConsumerKeeper = *app.ConsumerKeeper.SetHooks(app.SlashingKeeper.Hooks()) consumerModule := ibcconsumer.NewAppModule(app.ConsumerKeeper) diff --git a/app/consumer/export.go b/app/consumer/export.go index 77ba45dad8..297072cf1c 100644 --- a/app/consumer/export.go +++ b/app/consumer/export.go @@ -12,8 +12,8 @@ import ( tmtypes "github.com/tendermint/tendermint/types" ) -// ExportAppStateAndValidators exports the state of the application for a genesis -// file. +// ExportAppStateAndValidators implements the simapp app interface +// by exporting the state of the application func (app *App) ExportAppStateAndValidators( forZeroHeight bool, jailAllowedAddrs []string, ) (servertypes.ExportedApp, error) { @@ -39,17 +39,19 @@ func (app *App) ExportAppStateAndValidators( if err != nil { return servertypes.ExportedApp{}, err } + return servertypes.ExportedApp{ AppState: appState, - Validators: validators, Height: height, + Validators: validators, ConsensusParams: app.BaseApp.GetConsensusParams(ctx), }, nil } // prepare for fresh start at zero height // NOTE zero height genesis is a temporary feature which will be deprecated -// in favour of export at a block height +// +// in favour of export at a block height func (app *App) prepForZeroHeightGenesis(ctx sdk.Context, jailAllowedAddrs []string) { /* Just to be safe, assert the invariants on current state. */ diff --git a/cmd/interchain-security-cdd/main.go b/cmd/interchain-security-cdd/main.go new file mode 100644 index 0000000000..6b9bbffe60 --- /dev/null +++ b/cmd/interchain-security-cdd/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "os" + + "github.com/cosmos/cosmos-sdk/server" + svrcmd "github.com/cosmos/cosmos-sdk/server/cmd" + app "github.com/cosmos/interchain-security/app/consumer-democracy" + "github.com/tendermint/spm/cosmoscmd" +) + +func main() { + rootCmd, _ := cosmoscmd.NewRootCmd( + app.AppName, + app.AccountAddressPrefix, + app.DefaultNodeHome, + app.AppName, + app.ModuleBasics, + app.New, + // this line is used by starport scaffolding # root/arguments + ) + + if err := svrcmd.Execute(rootCmd, app.DefaultNodeHome); err != nil { + switch e := err.(type) { + case server.ErrorCode: + os.Exit(e.Code) + + default: + os.Exit(1) + } + } +} diff --git a/docs/interchain-security/proto-docs.md b/docs/interchain-security/proto-docs.md index 41782ce80b..1c974e7106 100644 --- a/docs/interchain-security/proto-docs.md +++ b/docs/interchain-security/proto-docs.md @@ -22,7 +22,7 @@ - [UnbondingSequence](#interchain_security.ccv.consumer.v1.UnbondingSequence) - [interchain_security/ccv/provider/v1/provider.proto](#interchain_security/ccv/provider/v1/provider.proto) - - [CreateConsumerChainProposal](#interchain_security.ccv.provider.v1.CreateConsumerChainProposal) + - [ConsumerAdditionProposal](#interchain_security.ccv.provider.v1.ConsumerAdditionProposal) - [HandshakeMetadata](#interchain_security.ccv.provider.v1.HandshakeMetadata) - [Params](#interchain_security.ccv.provider.v1.Params) @@ -278,10 +278,10 @@ UnbondingSequence defines the genesis information for each unbonding packet sequ - + -### CreateConsumerChainProposal -CreateConsumerChainProposal is a governance proposal on the provider chain to spawn a new consumer chain. +### ConsumerAdditionProposal +ConsumerAdditionProposal is a governance proposal on the provider chain to spawn a new consumer chain. If it passes, then all validators on the provider chain are expected to validate the consumer chain at spawn time or get slashed. It is recommended that spawn time occurs after the proposal end time. diff --git a/docs/keymap/README.md b/docs/keymap/README.md new file mode 100644 index 0000000000..dc8d896b67 --- /dev/null +++ b/docs/keymap/README.md @@ -0,0 +1,116 @@ +# KeyMap + +KeyMap is the name of the feature that allows validator operators to use different consensus keys for each consumer chain validator node that they operate. + +Validators can improve their security by using different consensus keys for each chain. That way, different teams in an organization can operate a subset (can be size 1) of the total number of consumer chains associated to a provider chain. If one key leaks the other keys will not be at risk. It is possible to change the keys at any time by submitting a transaction. + +## Overview + +The KeyMap feature is available via a provider chain API (transactions and queries). The provider chain validator operator submits a mapping transaction to the provider chain with a consumer chain ID and desired consensus key as parameters. The IBC protocol used by Interchain Security takes care of forwarding the mapping to the specified consumer chain. When the consumer chain receives the key, it will immediately start using it with tendermint. + +It is possible to start validating a consumer chain with the same key as used for the provider. It is also possible to specify another key to use when joining the validator set. Moreover it is possible to change the used key at any time, any multiple times, with some minor restrictions. + +## API (High level) + +**Writes** + +```go +// Associates a new consumer key as consensus key on the consumer chain +// for the validator on the provider chain associated to the provider key. +SetConsumerKey(providerKey, consumerChainID, consumerKey) { +} +``` + +**Reads** + + +```go +// Returns the last consumerKey associated to the provider key and +// the consumer chain by a call to SetConsumerKey. +GetConsumerKey(providerKey, consumerChainID) { +} +``` + +```go +// Returns the last providerKey associated to consumerKey and the consumer +// chain by a call to SetConsumerKey. +GetProviderKey(consumerKey, consumerChainID) { +} +``` + +## API (Details) + +**Writes** + +```go +// Attemps to associate a new consumer key consumerKey on the consumer chain +// specified by consumerChainID to the validator on the provider chain +// specified by providerKey. +// If the attempt succeeds, the consumer chain will start consumerKey as +// consensus key from the earliest block at which it receives the update +// via IBC. +// The attempt can fail if any of the arguments are invalid, if either chain +// or the IBC connection is faulty. +// The attempt can additionally fail if the key consumerKey was already used +// as for a mapping with the KeyMap feature too recently in the past. This is +// to prevent attacks. In particular, once a key is used in a KeyMap association +// that key is no longer useable for another association until the first +// association is cancelled, and an acknowledgement of the cancellation is +// received from the consumer chain and processed on the provider chain. +SetConsumerKey(providerKey, consumerChainID, consumerKey) { + // TODO: signatures, types +} +``` + +**Reads** + + +```go +// Returns the last consumerKey associated to the provider key and +// the consumer chain by a call to SetConsumerKey. +// TODO: more detail needed? +GetConsumerKey(providerKey, consumerChainID) { +} +``` + +```go +// Returns the last providerKey associated to consumerKey and the consumer +// chain by a call to SetConsumerKey. +// TODO: more detail needed? +GetProviderKey(consumerKey, consumerChainID) { +} +``` + +### External Properties - Interchain Security + +KeyMap has some properties relevant to the external user + +1. Validator Set Replication\ +When the Interchain Security property [Validator Set Replication](https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/system_model_and_properties.md#system-properties) holds for an implementation without KeyMap, then the property holds when KeyMap is used. +2. Slashable Consumer Misbehavior\ +When the Interchain Security property [Slashable Consumer Misbehavior](https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/system_model_and_properties.md#system-properties) holds for an implementation without KeyMap, then the property holds when KeyMap is used. + +In fact, all Interchain Security properties still hold when KeyMap is used, the above are just the most relevant. + +### External Properties - timeliness + +When a call to `SetConsumerKey` succeeds for a given `(providerKey, consumerChainID)` tuple at block height `hp0`, and is not followed by a subsquent call for the same tuple before or during a height `hp1` (`hp0 <= hp1`), and at `hp1` a validator set update packet is committed at the provider chain, then at the next earliest height `hc2` on the consumer chain that the packet is received, the `consumerKey` is passed as consensus key to tendermint. Thus tendermint will expect a signature from `consumerKey` from height `hc2 + 1`. + +TODO: check, test, correct, guarantee and formalize this. + +### Internal properties + +The KeyMap implementation satisfies a number of internal properties, which are used to guarantee the external properties. These are only relevant to system internals. They are, briefly: + +1. Validator Set Replication\ +'All consumer validator sets are some earlier provider validator set' +2. Queries\ +'It is always possible to query the provider key for a given consumer key, when the consumer can still make slash requests' +3. Pruning\ +'When the pruning method is used correctly, the internal state of the data structure does not grow unboundedly' + +Details can be found in x/ccv/provider/keeper/keymap_core_test.go. TODO: link? + +![Dummy](./diagrams/dummy.png) + +footer placeholder \ No newline at end of file diff --git a/docs/keymap/diagrams/dummy.excalidraw b/docs/keymap/diagrams/dummy.excalidraw new file mode 100644 index 0000000000..31a8e76861 --- /dev/null +++ b/docs/keymap/diagrams/dummy.excalidraw @@ -0,0 +1,284 @@ +{ + "type": "excalidraw", + "version": 2, + "source": "https://excalidraw.com", + "elements": [ + { + "id": "jnw1YontONnXqPoVC-D_5", + "type": "rectangle", + "x": -154.92880673737443, + "y": 3528.5756797513577, + "width": 921.0283500395554, + "height": 472.3939033586794, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 2, + "opacity": 100, + "groupIds": [], + "strokeSharpness": "sharp", + "seed": 1440774261, + "version": 74, + "versionNonce": 1312719803, + "isDeleted": false, + "boundElements": null, + "updated": 1665008710945, + "link": null, + "locked": false + }, + { + "id": "-lLVqybaoV_sFz4FYz-Q1", + "type": "freedraw", + "x": 127.38944319887776, + "y": 3784.339242812418, + "width": 486.37005434562263, + "height": 216.6303402976191, + "angle": 0, + "strokeColor": "#000000", + "backgroundColor": "transparent", + "fillStyle": "solid", + "strokeWidth": 2, + "strokeStyle": "solid", + "roughness": 2, + "opacity": 100, + "groupIds": [], + "strokeSharpness": "round", + "seed": 497609077, + "version": 54, + "versionNonce": 1332451483, + "isDeleted": false, + "boundElements": null, + "updated": 1665008713037, + "link": null, + "locked": false, + "points": [ + [ + 0, + 0 + ], + [ + -5.590460394777324, + -1.3976150986945868 + ], + [ + -9.783305690860288, + -4.192845296082851 + ], + [ + -12.578535888248894, + -11.18092078955442 + ], + [ + -12.578535888248894, + -16.771381184331858 + ], + [ + -8.385690592165929, + -26.554686875192147 + ], + [ + -1.3976150986943594, + -39.133222763441154 + ], + [ + 8.385690592165815, + -55.90460394777256 + ], + [ + 19.566611381720463, + -79.66406062557598 + ], + [ + 33.5427623686636, + -106.21874750076813 + ], + [ + 51.711758651689706, + -129.97820417857156 + ], + [ + 82.4592908229647, + -149.54481556029214 + ], + [ + 121.59251358640563, + -155.13527595506912 + ], + [ + 156.5328910537636, + -149.54481556029214 + ], + [ + 190.0756534224272, + -129.97820417857156 + ], + [ + 215.23272519892498, + -99.23067200729656 + ], + [ + 225.01603088978504, + -58.699834145161276 + ], + [ + 226.4136459884794, + -20.96422648041471 + ], + [ + 232.0041063832566, + 15.373766085637271 + ], + [ + 240.38979697542254, + 43.326068059524005 + ], + [ + 255.76356306106004, + 61.495064342549995 + ], + [ + 273.93255934408626, + 60.09744924385541 + ], + [ + 297.6920160218897, + 47.518913355606855 + ], + [ + 331.2347783905532, + 25.15707177649756 + ], + [ + 367.5727709566056, + -6.98807549347157 + ], + [ + 405.3083786213522, + -58.699834145161276 + ], + [ + 419.2845296082953, + -97.83305690860243 + ], + [ + 415.09168431221224, + -125.78535888248871 + ], + [ + 394.1274578317975, + -142.55674006682057 + ], + [ + 346.6085444761907, + -153.737660856375 + ], + [ + 294.89678582450097, + -152.34004575768085 + ], + [ + 238.99218187672818, + -129.97820417857156 + ], + [ + 183.08757792895562, + -93.64021161251912 + ], + [ + 122.99012868509988, + -53.10937375038429 + ], + [ + 76.86883042818738, + -27.95230197388628 + ], + [ + 16.771381184331744, + -22.361841579109296 + ], + [ + -29.34991707258075, + -40.530837862135286 + ], + [ + -51.71175865168982, + -61.495064342549995 + ], + [ + -64.2902945399386, + -85.25452102035342 + ], + [ + -67.08552473732732, + -113.2068229942397 + ], + [ + -53.109373750384066, + -141.15912496812598 + ], + [ + -15.373766085637499, + -150.94243065898627 + ], + [ + 29.349917072580638, + -143.9543551655147 + ], + [ + 85.25452102035331, + -117.39966829032255 + ], + [ + 134.1710494746544, + -76.86883042818772 + ], + [ + 155.13527595506923, + -48.91652845430099 + ], + [ + 163.52096654723516, + -18.16899628302599 + ], + [ + 160.72573634984644, + -4.192845296082851 + ], + [ + 149.5448155602919, + 2.795230197388719 + ], + [ + 135.56866457334877, + 6.98807549347157 + ], + [ + 125.7853588824886, + 8.385690592165702 + ], + [ + 121.59251358640563, + 8.385690592165702 + ], + [ + 121.59251358640563, + 8.385690592165702 + ] + ], + "pressures": [], + "simulatePressure": true, + "lastCommittedPoint": [ + 121.59251358640563, + 8.385690592165702 + ] + } + ], + "appState": { + "gridSize": null, + "viewBackgroundColor": "#ffffff" + }, + "files": {} +} \ No newline at end of file diff --git a/docs/keymap/diagrams/dummy.png b/docs/keymap/diagrams/dummy.png new file mode 100644 index 0000000000..a5efed1abd Binary files /dev/null and b/docs/keymap/diagrams/dummy.png differ diff --git a/docs/quality_assurance.md b/docs/quality_assurance.md index 0a95c47aad..aeef7c1af9 100644 --- a/docs/quality_assurance.md +++ b/docs/quality_assurance.md @@ -27,9 +27,9 @@ For an overview of the Interchain Security workflow, have a look at [the diagram | ID | Concern | Code Review | Automatic Tools | Unit Testing | | -- | ------- | ----------- | --------------- | ------------ | | 1.01 | Unexpected panics | `Scheduled` | `??` | `??` | -| 1.02 | Handling errors | `Scheduled` | `gosec` | `??` | +| 1.02 | Handling errors | `Scheduled` | `gosec` | `Partial coverage` | | 1.03 | Accessing store (setters, getters, iterators) | `Scheduled` | `??` | `Partial coverage` | -| 1.04 | Serialization / deserialization | `Scheduled` | `??` | `??` | +| 1.04 | Serialization / deserialization | `Scheduled` | `??` | `Partial coverage` | | 1.05 | Storage leaks | `Scheduled` | `NA` | `??` | ### Integration with IBC @@ -41,28 +41,28 @@ IBC packets: - MaturedVSCPacket - SlashPacketData -| ID | Concern | Code Review | Unit Testing | Diff. testing | Testnet | -| -- | ------- | ----------- | ------------ | ------------- | ------- | -| 2.01 | Create IBC clients | `Scheduled` (ibc-go team) | `Done` | `Future work` | `Scheduled` | -| 2.02 | Getting consumer `UnbondingPeriod` from IBC client | `Scheduled` (ibc-go team) | `??` | `NA` | `NA` | -| 2.03 | Create CCV channel (handshake) | `Scheduled` (ibc-go team) | `Done` | `Future work` | `Scheduled` | -| 2.04 | Sending IBC packets
- see `x/ccv/utils/utils.go:SendIBCPacket()` | `Scheduled` (ibc-go team) | `Done` | `Done` | `Scheduled` | -| 2.05 | Handling acknowledgments | `Scheduled` (ibc-go team) | `Partial coverage` | `Scheduled` | `Scheduled` | -| 2.06 | Handling timeouts | `Scheduled` (ibc-go team) | `Partial coverage` | `Future work` | `Scheduled` | -| 2.07 | **Handling IBC client expiration** | `Scheduled` (ibc-go team)
high priority | `??` | `Future work` | `Scheduled` | -| 2.08 | ICS-20 channel creation | `Scheduled` (ibc-go team) | `??` | `Future work` | `Scheduled` | -| 2.09 | ICS-20 transfer | `Scheduled` (ibc-go team) | `??` | `NA` | `Scheduled` | -| 2.10 | Changes in IBC-GO testing suite | `Scheduled` (ibc-go team) | `NA` | `Partial coverage` | `NA` | +| ID | Concern | Code Review | Unit Testing | E2E Testing | Diff. Testing | Testnet | +| -- | ------- | ----------- | ------------ | ----------- | ------------- | ------- | +| 2.01 | Create IBC clients | `Scheduled` (ibc-go team) | `Done` | `??` | `Future work` | `Scheduled` | +| 2.02 | Getting consumer `UnbondingPeriod` from IBC client | `Scheduled` (ibc-go team) | `Done`, see TestUnbondingTime` | `??` | `NA` | `NA` | +| 2.03 | Create CCV channel (handshake) | `Scheduled` (ibc-go team) | `Done` | `NA` | `Future work` | `Scheduled` | +| 2.04 | Sending IBC packets
- see `x/ccv/utils/utils.go:SendIBCPacket()` | `Scheduled` (ibc-go team) | `??` | `Done` | `Done` | `Scheduled` | +| 2.05 | Handling acknowledgments | `Scheduled` (ibc-go team) | `Partial Coverage` | `Partial coverage` | `Scheduled` | `Scheduled` | +| 2.06 | Handling timeouts | `Scheduled` (ibc-go team) | `??` |`??` | `Future work` | `Scheduled` | +| 2.07 | **Handling IBC client expiration** | `Scheduled` (ibc-go team)
high priority | `??` | `??` | `Future work` | `Scheduled` | +| 2.08 | ICS-20 channel creation | `Scheduled` (ibc-go team) | `??` | `??` |`Future work` | `Scheduled` | +| 2.09 | ICS-20 transfer | `Scheduled` (ibc-go team) | `??` | `??` | `NA` | `Scheduled` | +| 2.10 | Changes in IBC-GO testing suite | `Scheduled` (ibc-go team) | `NA` | `??` | `Partial coverage` | `NA` | ### Integration with Cosmos SDK A prerequisite of the code review is to open a PR with all the [SDK changes](https://github.com/cosmos/cosmos-sdk/tree/interchain-security-rebase) needed by Interchain Security. -| ID | Concern | Code Review | Unit Testing | Diff. testing | Testnet | -| -- | ------- | ----------- | ------------ | ------------- | ------- | -| 3.01 | Changes to staking module | `Scheduled` (sdk team) | `Partial coverage`
see [unbonding_test.go](../x/ccv/provider/unbonding_test.go)
redelegation and validator unbonding missing | `Partial coverage` | `Scheduled` | -| 3.02 | Changes to slashing module | `Scheduled` (sdk team) | `Done`
see [TestValidatorDowntime](../x/ccv/consumer/keeper/keeper_test.go#L345)
| `NA` | `Scheduled` | -| 3.03 | Changes to evidence module | `Scheduled` (sdk team) | `Done`
see [TestValidatorDoubleSigning](../x/ccv/consumer/keeper/keeper_test.go#L427)
| `NA` | `Scheduled` | +| ID | Concern | Code Review | Unit Testing | E2E Testing | Diff. Testing | Testnet | +| -- | ------- | ----------- | ------------ | ----------- | ------------- | ------- | +| 3.01 | Changes to staking module | `Scheduled` (sdk team) | `??` | `Partial coverage`
see [unbonding_test.go](../tests/e2e/unbonding_test.go)
redelegation could be expanded, validator unbonding missing | `Partial coverage` | `Scheduled` | +| 3.02 | Changes to slashing module | `Scheduled` (sdk team) | `??` | `Done`
see [TestValidatorDowntime](../tests/e2e/slashing_test.go#L502)
| `NA` | `Scheduled` | +| 3.03 | Changes to evidence module | `Scheduled` (sdk team) | `??` | `Done`
see [TestValidatorDoubleSigning](../tests/e2e/slashing_test.go#L584)
| `NA` | `Scheduled` | ### Provider Chain Correctness @@ -71,21 +71,21 @@ The main concern addressed in this section is the correctness of the provider ch - one single consumer chain; - multiple consumer chains. -| ID | Concern | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 4.01 | Liveness of undelegations
- unbonding delegation entries are eventually removed from `UnbondingDelegation` | `Scheduled` | `Done`
see [unbonding_test.go](../x/ccv/provider/unbonding_test.go) | `Done` | `Scheduled` | `Scheduled` | -| 4.02 | Liveness of redelegations
- redelegations entries are eventually removed from `Redelegations` | `Scheduled` | `Scheduled` | `Scheduled` | `Scheduled` | `Scheduled` | -| 4.03 | Liveness of validator unbondings
- unbonding validators with no delegations are eventually removed from `Validators` | `Scheduled` | `Scheduled` | `Done` | `Scheduled` | `Scheduled` | -| 4.04 | Unbonding operations (undelegations, redelegations, validator unbondings) should eventually complete even if the CCV channel is never established (due to error)
- expected outcome: the pending VSC packets eventually timeout, which leads to the consumer chain removal | `Scheduled` | `??` | `Future work` | `Scheduled` | `Scheduled`
high priority | -| 4.05 | Unbonding operations (undelegations, redelegations, validator unbondings) should eventually complete even if one of the clients expire
- expected outcome: the pending VSC packets eventually timeout, which leads to the consumer chain removal | `Scheduled` | `??` | `Future work` | `Scheduled` | `Scheduled`
high priority | -| 4.06 | A validator cannot get slashed more than once for double signing, regardless of how many times it double signs on different chains (consumers or provider) | `Scheduled` | `Done`
see [TestHandleSlashPacketErrors](../x/provider/keeper_test.go#L329) | `Done` | `Scheduled` | `NA` | -| 4.07 | A validator cannot get slashed multiple times for downtime on the same consumer chain without requesting to `Unjail` itself on the provider chain in between | `Scheduled` | `Done`
see [TestSendSlashPacket](../x/consumer/keeper_test.go#489)| `Partial coverage` | `Scheduled` | `NA` | -| 4.08 | A validator can be slashed multiple times for downtime on different chains | `Scheduled` | `Future work` | `NA` | `Scheduled` | `NA` | -| 4.09 | The provider chain can easily be restarted with IS enabled
- `ExportGenesis` & `InitGenesis` | `Scheduled` | `Future work` | `Future work` | `Scheduled` | `NA` | -| 4.10 | The provider chain's correctness is not affected by a consumer chain shutting down | `Scheduled` | `Future work` | `Future work` | `Scheduled` | `NA` | -| 4.11 | The provider chain can graciously handle a CCV packet timing out (without shuting down)
- expected outcome: consumer chain shuts down and its state in provider CCV module is removed | `Scheduled` | `Future work` | `Future work` | `Scheduled` | `NA` | -| 4.12 | The provider chain can graciously handle a `StopConsumerChainProposal`
- expected outcome: consumer chain shuts down and its state in provider CCV module is removed | `Scheduled` | `Done`
see [stop_consumer_test.go](../x/ccv/provider/stop_consumer_test.go) | `Future work` | `Scheduled` | `NA` | -| 4.13 | The provider chain can graciously handle a `SpawnConsumerChainProposal`
- expected outcome: a consumer chain is registered and a client is created | `Scheduled` |`Done`
see [TestCreateConsumerChainProposal](../x/ccv/provider/keeper/proposal_test.go#L44) | `Future work` | `Scheduled` | `NA` | +| ID | Concern | Code Review | Unit | E2e | Diff. Testing | Testnet | Protocol audit | +| -- | ------- | ----------- | ---- | --- | ------------- | ------- | -------------- | +| 4.01 | Liveness of undelegations
- unbonding delegation entries are eventually removed from `UnbondingDelegation` | `Scheduled` | `NA` | `Done`
see [here](../tests/e2e/unbonding_test.go) | `Done` | `Scheduled` | `Scheduled` | +| 4.02 | Liveness of redelegations
- redelegations entries are eventually removed from `Redelegations` | `NA` | `Scheduled` | `Scheduled` | `Scheduled` | `Scheduled` | `Scheduled` | +| 4.03 | Liveness of validator unbondings
- unbonding validators with no delegations are eventually removed from `Validators` | `NA` | `Scheduled` | `Scheduled` | `Done` | `Scheduled` | `Scheduled` | +| 4.04 | Unbonding operations (undelegations, redelegations, validator unbondings) should eventually complete even if the CCV channel is never established (due to error)
- expected outcome: the pending VSC packets eventually timeout, which leads to the consumer chain removal | `Scheduled` | `NA` | `??` | `Future work` | `Scheduled` | `Scheduled`
high priority | +| 4.05 | Unbonding operations (undelegations, redelegations, validator unbondings) should eventually complete even if one of the clients expire
- expected outcome: the pending VSC packets eventually timeout, which leads to the consumer chain removal | `Scheduled` | `??` | `??` | `Future work` | `Scheduled` | `Scheduled`
high priority | +| 4.06 | A validator cannot get slashed more than once for double signing, regardless of how many times it double signs on different chains (consumers or provider) | `Scheduled` | `NA` |`Done`
see [here](../tests/e2e/slashing_test.go#L317) | `Done` | `Scheduled` | `NA` | +| 4.07 | A validator cannot get slashed multiple times for downtime on the same consumer chain without requesting to `Unjail` itself on the provider chain in between | `Scheduled` | `NA` | `Done`
see [here](../tests/e2e/slashing_test.go#642)| `Partial coverage` | `Scheduled` | `NA` | +| 4.08 | A validator can be slashed multiple times for downtime on different chains | `Scheduled` | `NA` | `Future work` | `NA` | `Scheduled` | `NA` | +| 4.09 | The provider chain can easily be restarted with IS enabled
- `ExportGenesis` & `InitGenesis` | `Scheduled` | `??` | `Future work` | `Future work` | `Scheduled` | `NA` | +| 4.10 | The provider chain's correctness is not affected by a consumer chain shutting down | `Scheduled` | `NA` | `Future work` | `Future work` | `Scheduled` | `NA` | +| 4.11 | The provider chain can graciously handle a CCV packet timing out (without shuting down)
- expected outcome: consumer chain shuts down and its state in provider CCV module is removed | `Scheduled` | `??` | `Future work` | `Future work` | `Scheduled` | `NA` | +| 4.12 | The provider chain can graciously handle a `ConsumerRemovalProposal`
- expected outcome: consumer chain shuts down and its state in provider CCV module is removed | `Scheduled` | `Done`
see [here](../x/ccv/provider/keeper/proposal_test.go#L313) | `NA` | `Future work` | `Scheduled` | `NA` | +| 4.13 | The provider chain can graciously handle a `ConsumerAdditionProposal`
- expected outcome: a consumer chain is registered and a client is created | `Scheduled` |`Done`
see [here](../x/ccv/provider/keeper/proposal_test.go#L31) | `NA` | `Future work` | `Scheduled` | `NA` | ### Interchain Security Protocol Correctness @@ -101,41 +101,41 @@ In addition, the implementation MUST guarantee the following [system properties] --- -| ID | Concern re. *Channel Uniqueness* | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 5.01 | `SpawnConsumerChainProposal(chainId)` should fail if a consumer with `chainId` is already registered | `Scheduled` | `??` | `NA` | `Scheduled` | `Scheduled`
high priority | -| 5.02 | The channel handshake for a consumer with `chainId` should fail if there is already an established CCV channel for `chainId` | `Scheduled` | `??` | `NA` | `Scheduled` | `Scheduled`
high priority | -| 5.03 | *Channel Uniqueness* should hold even if a consumer chain restarts | `Scheduled` | `??` | `NA` | `Scheduled` | `NA` | -| 5.04 | *Channel Uniqueness* should hold even when a client expires | `Scheduled` | `??` | `NA` | `Scheduled` | `NA` | +| ID | Concern re. *Channel Uniqueness* | Code Review | Unit Testing | E2e Testing | Diff. Testing | Testnet | Protocol audit | +| -- | -------------------------------- | ----------- | ------------ | ----------- | ------------- | ------- | -------------- | +| 5.01 | `HandleConsumerAdditionProposal()` should fail if a consumer with `chainId` is already registered | `Scheduled` | `DONE` see [here](../x/ccv/provider/keeper/proposal_test.go#L138) | `??` | `NA` | `Scheduled` | `Scheduled`
high priority | +| 5.02 | The channel handshake for a consumer with `chainId` should fail if there is already an established CCV channel for `chainId` | `Scheduled` | `DONE` see [here](../x/ccv/provider/ibc_module_test.go#L103) and [here](../x/ccv/consumer/ibc_module_test.go#L59) | `??` | `NA` | `Scheduled` | `Scheduled`
high priority | +| 5.03 | *Channel Uniqueness* should hold even if a consumer chain restarts | `Scheduled` | `NA` | `??` | `NA` | `Scheduled` | `NA` | +| 5.04 | *Channel Uniqueness* should hold even when a client expires | `Scheduled` | `??` | `NA` | `NA` | `Scheduled` | `NA` | --- -| ID | Concern re. *Validator Set Replication* | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 6.01 | Every validator set on any consumer chain MUST either be or have been a validator set on the provider chain. | `Scheduled` | `NA` | `Done` | `Scheduled` | `Scheduled` | -| 6.02 | Any update in the power of a validator `val` on the provider, as a result of
- (increase) `Delegate()` / `Redelegate()` to `val`
- (increase) `val` joining the provider validator set
- (decrease) `Undelegate()` / `Redelegate()` from `val`
- (decrease) `Slash(val)`
- (decrease) `val` leaving the provider validator set
MUST be present in a `ValidatorSetChangePacket` that is sent to all registered consumer chains | `Scheduled` | `NA` | `Done` | `Scheduled` | `Scheduled` | -| 6.03 | Every consumer chain receives the same sequence of `ValidatorSetChangePacket`s in the same order. | `Scheduled` | `NA` | `NA` | `Scheduled` | `Scheduled`
high priority | +| ID | Concern re. *Validator Set Replication* | Code Review | Unit Testing | E2e Testing | Diff. testing | Testnet | Protocol audit | +| -- | --------------------------------------- | ----------- | ------------ | ----------- | ------------- | ------- | -------------- | +| 6.01 | Every validator set on any consumer chain MUST either be or have been a validator set on the provider chain. | `Scheduled` | `NA` | `NA` | `Done` | `Scheduled` | `Scheduled` | +| 6.02 | Any update in the power of a validator `val` on the provider, as a result of
- (increase) `Delegate()` / `Redelegate()` to `val`
- (increase) `val` joining the provider validator set
- (decrease) `Undelegate()` / `Redelegate()` from `val`
- (decrease) `Slash(val)`
- (decrease) `val` leaving the provider validator set
MUST be present in a `ValidatorSetChangePacket` that is sent to all registered consumer chains | `Scheduled` | `NA` | `NA` | `Done` | `Scheduled` | `Scheduled` | +| 6.03 | Every consumer chain receives the same sequence of `ValidatorSetChangePacket`s in the same order. | `Scheduled` | `NA` | `NA` | `NA` | `Scheduled` | `Scheduled`
high priority | --- -| ID | Concern re. *Bond-Based Consumer Voting Power* | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 7.01 | For every `ValidatorSetChangePacket` received by a consumer chain at time `t`, a `MaturedVSCPacket` is sent back to the provider in the first block with a timestamp `>= t + UnbondingPeriod` | `Scheduled` | `Scheduled` | `Done` | `Scheduled` | `Scheduled` | -| 7.02 | If an unbonding operation resulted in a `ValidatorSetChangePacket` sent to all registered consumer chains, then it cannot complete before receiving matching `MaturedVSCPacket`s from these consumer chains (unless some of these consumer chains are removed) | `Scheduled` | `Scheduled` | `Done` | `Scheduled` | `Scheduled` | +| ID | Concern re. *Bond-Based Consumer Voting Power* | Code Review | Unit Testing | E2e Testing | Diff. Testing | Testnet | Protocol audit | +| -- | ---------------------------------------------- | ----------- | ------------ | ----------- | ------------- | ------- | -------------- | +| 7.01 | For every `ValidatorSetChangePacket` received by a consumer chain at time `t`, a `MaturedVSCPacket` is sent back to the provider in the first block with a timestamp `>= t + UnbondingPeriod` | `Scheduled` | `??` | `Scheduled` | `Done` | `Scheduled` | `Scheduled` | +| 7.02 | If an unbonding operation resulted in a `ValidatorSetChangePacket` sent to all registered consumer chains, then it cannot complete before receiving matching `MaturedVSCPacket`s from these consumer chains (unless some of these consumer chains are removed) | `Scheduled` | `??` | `Scheduled` | `Done` | `Scheduled` | `Scheduled` | --- -| ID | Concern re. *Slashable Consumer Misbehavior* | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 8.01 | Multiple downtime infractions committed by the same validator `val` on the same consumer chain without `val` requesting to `Unjail` itself result in a single `SlashPacket` | `Scheduled` | `??` | `Done` | `Scheduled` | `Scheduled` | -| 8.02 | If evidence of misbehavior is submitted on a consumer chain within the unbonding period targeting an amount `x` of staked tokens, the amount `x` cannot be unlocked on the provider before the corresponding `SlashPacket` is received
- `SlashPacket` will not arrive after the corresponding `MaturedVSCPacket`s | `Scheduled` | `??` | `Done` | `Scheduled` | `Scheduled` | +| ID | Concern re. *Slashable Consumer Misbehavior* | Code Review | Unit Testing | E2e Testing | Diff. testing | Testnet | Protocol audit | +| -- | -------------------------------------------- | ----------- | ------------ | ----------- | ------------- | ------- | -------------- | +| 8.01 | Multiple downtime infractions committed by the same validator `val` on the same consumer chain without `val` requesting to `Unjail` itself result in a single `SlashPacket` | `Scheduled` | `??` | `??` | `??` | `Done` | `Scheduled` | `Scheduled` | +| 8.02 | If evidence of misbehavior is submitted on a consumer chain within the unbonding period targeting an amount `x` of staked tokens, the amount `x` cannot be unlocked on the provider before the corresponding `SlashPacket` is received
- `SlashPacket` will not arrive after the corresponding `MaturedVSCPacket`s | `Scheduled` | `??` | `??` | `??` | `Done` | `Scheduled` | `Scheduled` | --- -| ID | Concern re. *Consumer Rewards Distribution* | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 9.01 | Validators on the provider chain receive rewards for participating in IS | `Scheduled` | `Scheduled` | `NA` | `Scheduled` | `NA` | -| 9.02 | The rewards sent to the provider chain are escrowed on the consumer chains (no double spend) | `Scheduled` | `Scheduled` | `NA` | `Scheduled` | `NA` | +| ID | Concern re. *Consumer Rewards Distribution* | Code Review | Unit Testing | E2e Testing | Diff. testing | Testnet | Protocol audit | +| -- | ------------------------------------------- | ----------- | ------------ | ----------- | ------------- | ------- | -------------- | +| 9.01 | Validators on the provider chain receive rewards for participating in IS | `Scheduled` | `NA` | `Scheduled` | `NA` | `Scheduled` | `NA` | +| 9.02 | The rewards sent to the provider chain are escrowed on the consumer chains (no double spend) | `Scheduled` | `NA` | `Scheduled` | `NA` | `Scheduled` | `NA` | --- @@ -146,14 +146,14 @@ The main concern addressed in this section is the correctness of the consumer ch - governance-enabled consumer chain ([gov-cc](https://github.com/cosmos/interchain-security/issues/141)), with the modified staking and distribution modules (see `x/ccv/staking` and `x/ccv/distribution`); also, must look at the [atom-gov module](https://github.com/cosmos/interchain-security/issues/162) - CosmWasm-enabled consumer chain ([wasm-cc](https://github.com/cosmos/interchain-security/issues/143)), with the CosmWasm module enabled -| ID | Concern | Code Review | Unit Testing | Diff. testing | Testnet | Protocol audit | -| -- | ------- | ----------- | ------------ | ------------- | ------- | ----- | -| 10.01 | Consumer chain liveness (blocks are being produced) | `Scheduled` | `NA` | `??` | `Scheduled` | `NA` | -| 10.02 | A chain has the ability to restart as a consumer chain with no more than 24 hours downtime | `Scheduled` | `NA` | `??` | `Scheduled` | `NA` | -| 10.03 | A consumer chain has the ability to restart as a normal chain after shutting down, either controlled (via `StopConsumerChainProposal`) or due to timing out | `Scheduled` | `??` | `??` | `Scheduled` | `NA` | -| 10.04 | A consumer chain has the ability to restart as a consumer chain with the same `chainId` after shutting down, either controlled (via `StopConsumerChainProposal`) or due to timing out | `Scheduled` | `??` | `??` | `Scheduled` | `NA` | -| 10.05 | Governance on `gov-cc` | `Scheduled` | `??` | `??` | `Scheduled` | `NA` | -| 10.06 | CosmWasm on `wasm-cc` | `Scheduled` | `??` | `??` | `Scheduled` | `NA` | +| ID | Concern | Code Review | Unit Testing | E2e Testing | Diff. testing | Testnet | Protocol audit | +| -- | ------- | ----------- | ------------ | ----------- | ------------- | ------- | -------------- | +| 10.01 | Consumer chain liveness (blocks are being produced) | `Scheduled` | `NA` | `NA` | `??` | `Scheduled` | `NA` | +| 10.02 | A chain has the ability to restart as a consumer chain with no more than 24 hours downtime | `Scheduled` | `NA` | `NA` | `??` | `Scheduled` | `NA` | +| 10.03 | A consumer chain has the ability to restart as a normal chain after shutting down, either controlled (via `ConsumerRemovalProposal`) or due to timing out | `Scheduled` | `??` | `??` | `??` | `Scheduled` | `NA` | +| 10.04 | A consumer chain has the ability to restart as a consumer chain with the same `chainId` after shutting down, either controlled (via `ConsumerRemovalProposal`) or due to timing out | `Scheduled` | `??` | `??` | `??` | `Scheduled` | `NA` | +| 10.05 | Governance on `gov-cc` | `Scheduled` | `??` | `??` | `??` | `Scheduled` | `NA` | +| 10.06 | CosmWasm on `wasm-cc` | `Scheduled` | `??` | `??` | `??` | `Scheduled` | `NA` | | TBA ... > TODO create clear concerns for `gov-cc` and `wasm-cc` once the implementations are done diff --git a/go.mod b/go.mod index 15e2b7af1b..20df7e5aab 100644 --- a/go.mod +++ b/go.mod @@ -31,6 +31,7 @@ require ( require ( github.com/golang/mock v1.6.0 + github.com/oxyno-zeta/gomock-extra-matcher v1.1.0 github.com/regen-network/cosmos-proto v0.3.1 ) @@ -127,6 +128,7 @@ require ( gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect nhooyr.io/websocket v1.8.6 // indirect + pgregory.net/rapid v0.5.2 // indirect ) replace ( diff --git a/go.sum b/go.sum index ac7da1f837..8c90862b34 100644 --- a/go.sum +++ b/go.sum @@ -232,8 +232,6 @@ github.com/cosmos/btcutil v1.0.4/go.mod h1:Ffqc8Hn6TJUdDgHBwIZLtrLQC1KdJ9jGJl/Tv github.com/cosmos/cosmos-sdk v0.44.2/go.mod h1:fwQJdw+aECatpTvQTo1tSfHEsxACdZYU80QCZUPnHr4= github.com/cosmos/cosmos-sdk v0.44.3/go.mod h1:bA3+VenaR/l/vDiYzaiwbWvRPWHMBX2jG0ygiFtiBp0= github.com/cosmos/cosmos-sdk v0.45.0/go.mod h1:XXS/asyCqWNWkx2rW6pSuen+EVcpAFxq6khrhnZgHaQ= -github.com/cosmos/cosmos-sdk v0.45.2-0.20220811130336-846d0158765e h1:aKKTrqI9mNCQpLkul4S6BHdWYrdNrFNhiHfy2Oh2yhM= -github.com/cosmos/cosmos-sdk v0.45.2-0.20220811130336-846d0158765e/go.mod h1:XXS/asyCqWNWkx2rW6pSuen+EVcpAFxq6khrhnZgHaQ= github.com/cosmos/cosmos-sdk v0.45.2-0.20220901181011-06d4a64bf808 h1:PW5p0/qt5iJZS7f4bDjo/OYhMhzlmCewh8PZrpBluxo= github.com/cosmos/cosmos-sdk v0.45.2-0.20220901181011-06d4a64bf808/go.mod h1:XXS/asyCqWNWkx2rW6pSuen+EVcpAFxq6khrhnZgHaQ= github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXyBmiFeKpvmq4dzayMdCjCnu8uqmCysIGBT2Y= @@ -865,6 +863,8 @@ github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJ github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= github.com/otiai10/mint v1.3.2/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/oxyno-zeta/gomock-extra-matcher v1.1.0 h1:Yyk5ov0ZPKBXtVEeIWtc4J2XVrHuNoIK+0F2BUJgtsc= +github.com/oxyno-zeta/gomock-extra-matcher v1.1.0/go.mod h1:UMGTHYEmJ1dRq8LDZ7VTAYO4nqM3GD1UGC3RJEUxEz0= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= @@ -1744,6 +1744,8 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jC mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= nhooyr.io/websocket v1.8.6 h1:s+C3xAMLwGmlI31Nyn/eAehUlZPwfYZu2JXM621Q5/k= nhooyr.io/websocket v1.8.6/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= +pgregory.net/rapid v0.5.2 h1:zC+jmuzcz5yJvG/igG06aLx8kcGmZY435NcuyhblKjY= +pgregory.net/rapid v0.5.2/go.mod h1:PY5XlDGj0+V1FCq0o192FdRhpKHGTRIWBgqjDBTrq04= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/proto/buf.lock b/proto/buf.lock index 755e2cd76b..68c709a8d9 100644 --- a/proto/buf.lock +++ b/proto/buf.lock @@ -4,14 +4,8 @@ deps: - remote: buf.build owner: cosmos repository: gogo-proto - branch: main commit: bee5511075b7499da6178d9e4aaa628b - digest: b1-rrBIustouD-S80cVoZ_rM0qJsmei9AgbXy9GPQu6vxg= - create_time: 2021-12-02T20:01:17.069307Z - remote: buf.build owner: googleapis repository: googleapis - branch: main - commit: 40f07f5b563941f2b20b991a7aedd53d - digest: b1-Iv8fTR4AKXwNW80Ey6K5tY8cP053y_95sB5fro9IWZo= - create_time: 2021-12-02T15:07:41.896892Z + commit: 62f35d8aed1149c291d606d958a7ce32 diff --git a/proto/interchain_security/ccv/consumer/v1/consumer.proto b/proto/interchain_security/ccv/consumer/v1/consumer.proto index 8f095cfcfc..57d0f0fbd3 100644 --- a/proto/interchain_security/ccv/consumer/v1/consumer.proto +++ b/proto/interchain_security/ccv/consumer/v1/consumer.proto @@ -10,9 +10,10 @@ import "cosmos/staking/v1beta1/staking.proto"; import "gogoproto/gogo.proto"; import "cosmos_proto/cosmos.proto"; - // Params defines the parameters for CCV consumer module message Params { + // TODO: Remove enabled flag and find a better way to setup e2e tests + // See: https://github.com/cosmos/interchain-security/issues/339 bool enabled = 1; /////////////////////// @@ -27,22 +28,22 @@ message Params { // transfers over. These parameters is auto-set during the consumer <-> // provider handshake procedure. string distribution_transmission_channel = 3; - string provider_fee_pool_addr_str = 4; + string provider_fee_pool_addr_str = 4; } // LastTransmissionBlockHeight is the last time validator holding // pools were transmitted to the provider chain -message LastTransmissionBlockHeight { - int64 height = 1; -} +message LastTransmissionBlockHeight { int64 height = 1; } // CrossChainValidator defines the validators for CCV consumer module message CrossChainValidator { bytes address = 1; - int64 power = 2; + int64 power = 2; // pubkey is the consensus public key of the validator, as a Protobuf Any. - google.protobuf.Any pubkey = 3 - [(cosmos_proto.accepts_interface) = "cosmos.crypto.PubKey", (gogoproto.moretags) = "yaml:\"consensus_pubkey\""]; + google.protobuf.Any pubkey = 3 [ + (cosmos_proto.accepts_interface) = "cosmos.crypto.PubKey", + (gogoproto.moretags) = "yaml:\"consensus_pubkey\"" + ]; } // SlashRequest defines a slashing request for CCV consumer module @@ -50,3 +51,9 @@ message SlashRequest { interchain_security.ccv.v1.SlashPacketData packet = 1; cosmos.staking.v1beta1.InfractionType infraction = 2; } + +// SlashRequests is a list of slash requests for CCV consumer module +message SlashRequests { + repeated SlashRequest requests = 1 + [ (gogoproto.nullable) = false ]; +} diff --git a/proto/interchain_security/ccv/consumer/v1/genesis.proto b/proto/interchain_security/ccv/consumer/v1/genesis.proto index 171c145f71..c71aecd64a 100644 --- a/proto/interchain_security/ccv/consumer/v1/genesis.proto +++ b/proto/interchain_security/ccv/consumer/v1/genesis.proto @@ -12,20 +12,45 @@ import "tendermint/abci/types.proto"; // GenesisState defines the CCV consumer chain genesis state message GenesisState { - Params params = 1 [(gogoproto.nullable) = false]; - string provider_client_id = 2; // empty for a completely new chain - string provider_channel_id = 3; // empty for a completely new chain - bool new_chain = 4; // true for new chain GenesisState, false for chain restart. + Params params = 1 [ (gogoproto.nullable) = false ]; + string provider_client_id = 2; // empty for a completely new chain + string provider_channel_id = 3; // empty for a completely new chain + bool new_chain = 4; // true for new chain GenesisState, false for chain restart. // ProviderClientState filled in on new chain, nil on restart. ibc.lightclients.tendermint.v1.ClientState provider_client_state = 5; // ProviderConsensusState filled in on new chain, nil on restart. ibc.lightclients.tendermint.v1.ConsensusState provider_consensus_state = 6; - repeated MaturingVSCPacket maturing_packets = 7 [(gogoproto.nullable) = false]; - repeated .tendermint.abci.ValidatorUpdate initial_val_set = 8 [(gogoproto.nullable) = false]; + // MaturingPackets nil on new chain, filled on restart. + repeated MaturingVSCPacket maturing_packets = 7 + [ (gogoproto.nullable) = false ]; + // InitialValset filled in on new chain and on restart. + repeated .tendermint.abci.ValidatorUpdate initial_val_set = 8 + [ (gogoproto.nullable) = false ]; + // HeightToValsetUpdateId nil on new chain, filled on restart. + repeated HeightToValsetUpdateID height_to_valset_update_id = 9 + [ (gogoproto.nullable) = false ]; + // OutstandingDowntimes nil on new chain, filled on restart. + repeated OutstandingDowntime outstanding_downtime_slashing = 10 + [ (gogoproto.nullable) = false ]; + // PendingSlashRequests filled in on new chain, nil on restart. + interchain_security.ccv.consumer.v1.SlashRequests pending_slash_requests = 11 + [ (gogoproto.nullable) = false ]; } -// UnbondingSequence defines the genesis information for each unbonding packet sequence. +// MaturingVSCPacket defines the genesis information for the +// unbonding VSC packet message MaturingVSCPacket { - uint64 vscId = 1; - uint64 maturity_time = 2; + uint64 vscId = 1; + uint64 maturity_time = 2; } + +// HeightValsetUpdateID defines the genesis information for the mapping +// of each block height to a valset update id +message HeightToValsetUpdateID { + uint64 height = 1; + uint64 valset_update_id = 2; +} + +// OutstandingDowntime defines the genesis information for each validator +// flagged with an outstanding downtime slashing. +message OutstandingDowntime { string validator_consensus_address = 1; } \ No newline at end of file diff --git a/proto/interchain_security/ccv/provider/v1/genesis.proto b/proto/interchain_security/ccv/provider/v1/genesis.proto index e26c5d4a48..dedfc72120 100644 --- a/proto/interchain_security/ccv/provider/v1/genesis.proto +++ b/proto/interchain_security/ccv/provider/v1/genesis.proto @@ -7,15 +7,72 @@ option go_package = "github.com/cosmos/interchain-security/x/ccv/provider/types" import "gogoproto/gogo.proto"; import "interchain_security/ccv/v1/ccv.proto"; import "interchain_security/ccv/provider/v1/provider.proto"; +import "interchain_security/ccv/consumer/v1/consumer.proto"; +import "interchain_security/ccv/consumer/v1/genesis.proto"; + // GenesisState defines the CCV provider chain genesis state message GenesisState { - repeated ConsumerState consumer_states = 1 [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"consumer_states\""]; - Params params = 2 [(gogoproto.nullable) = false]; + // empty for a new chain + uint64 valset_update_id = 1; + // empty for a new chain + repeated ConsumerState consumer_states = 2 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"consumer_states\"" + ]; + // empty for a new chain + repeated interchain_security.ccv.v1.UnbondingOp unbonding_ops = 3 + [ (gogoproto.nullable) = false ]; + // empty for a new chain + interchain_security.ccv.v1.MaturedUnbondingOps mature_unbonding_ops = 4; + // empty for a new chain + repeated ValsetUpdateIdToHeight valset_update_id_to_height = 5 + [ (gogoproto.nullable) = false ]; + // empty for a new chain + repeated ConsumerAdditionProposal consumer_addition_proposals = 6 + [ (gogoproto.nullable) = false ]; + // empty for a new chain + repeated ConsumerRemovalProposal consumer_removal_proposals = 7 + [ (gogoproto.nullable) = false ]; + Params params = 8 + [ (gogoproto.nullable) = false ]; } -// ConsumerState defines the state that the provider chain stores for each consumer chain +// consumer chain message ConsumerState { - string chain_id = 1; - string channel_id = 2; + // ChannelID defines the chain ID for the consumer chain + string chain_id = 1; + // ChannelID defines the IBC channel ID for the consumer chain + string channel_id = 2; + // ClientID defines the IBC client ID for the consumer chain + string client_id = 3; + // InitalHeight defines the initial block height for the consumer chain + uint64 initial_height = 4; + // LockUnbondingOnTimeout defines whether the unbonding funds should be released for this + // chain in case of a IBC channel timeout + bool lock_unbonding_on_timeout = 5; + // ConsumerGenesis defines the initial consumer chain genesis states + interchain_security.ccv.consumer.v1.GenesisState consumer_genesis = 6 + [ (gogoproto.nullable) = false ]; + // PendingValsetChanges defines the pending validator set changes for the consumer chain + repeated interchain_security.ccv.v1.ValidatorSetChangePacketData pending_valset_changes = 7 + [ (gogoproto.nullable) = false ]; + repeated string slash_downtime_ack = 8; + // UnbondingOpsIndex defines the unbonding operations on the consumer chain + repeated UnbondingOpIndex unbonding_ops_index = 9 + [ (gogoproto.nullable) = false ]; +} + +// UnbondingOpIndex defines the genesis information for each unbonding operations index +// referenced by chain id and valset udpate id +message UnbondingOpIndex { + uint64 valset_update_id = 1; + repeated uint64 unbonding_op_index = 2; +} + +// ValsetUpdateIdToHeight defines the genesis information for the mapping +// of each valset udpate id to a block height +message ValsetUpdateIdToHeight { + uint64 valset_update_id = 1; + uint64 height = 2; } diff --git a/proto/interchain_security/ccv/provider/v1/provider.proto b/proto/interchain_security/ccv/provider/v1/provider.proto index 0d21a6a10b..0c0beb2bb3 100644 --- a/proto/interchain_security/ccv/provider/v1/provider.proto +++ b/proto/interchain_security/ccv/provider/v1/provider.proto @@ -9,10 +9,10 @@ import "google/protobuf/timestamp.proto"; import "ibc/core/client/v1/client.proto"; import "ibc/lightclients/tendermint/v1/tendermint.proto"; -// CreateConsumerChainProposal is a governance proposal on the provider chain to spawn a new consumer chain. +// ConsumerAdditionProposal is a governance proposal on the provider chain to spawn a new consumer chain. // If it passes, then all validators on the provider chain are expected to validate the consumer chain at spawn time // or get slashed. It is recommended that spawn time occurs after the proposal end time. -message CreateConsumerChainProposal { +message ConsumerAdditionProposal { option (gogoproto.goproto_getters) = false; option (gogoproto.goproto_stringer) = false; @@ -40,10 +40,10 @@ message CreateConsumerChainProposal { bool lock_unbonding_on_timeout = 8; } -// StopConsumerProposal is a governance proposal on the provider chain to stop a consumer chain. +// ConsumerRemovalProposal is a governance proposal on the provider chain to remove (and stop) a consumer chain. // If it passes, all the consumer chain's state is removed from the provider chain. The outstanding unbonding // operation funds are released if the LockUnbondingOnTimeout parameter is set to false for the consumer chain ID. - message StopConsumerChainProposal { + message ConsumerRemovalProposal { // the title of the proposal string title = 1; // the description of the proposal @@ -62,5 +62,11 @@ message Params { message HandshakeMetadata { string provider_fee_pool_addr = 1; - string version = 2; + string version = 2; } + +// SlashAcks contains addesses of consumer chain validators +// successfully slashed on the provider chain +message SlashAcks { + repeated string addresses = 1; +} \ No newline at end of file diff --git a/proto/interchain_security/ccv/provider/v1/query.proto b/proto/interchain_security/ccv/provider/v1/query.proto index 559049c5ce..70f36e2967 100644 --- a/proto/interchain_security/ccv/provider/v1/query.proto +++ b/proto/interchain_security/ccv/provider/v1/query.proto @@ -8,17 +8,18 @@ import "gogoproto/gogo.proto"; import "interchain_security/ccv/consumer/v1/genesis.proto"; service Query { - // ConsumerGenesis queries the genesis state needed to start a consumer chain whose proposal - // has been accepted - rpc ConsumerGenesis(QueryConsumerGenesisRequest) returns (QueryConsumerGenesisResponse) { - option (google.api.http).get = "/interchain_security/ccv/provider/consumer_genesis/{chain_id}"; + // ConsumerGenesis queries the genesis state needed to start a consumer chain + // whose proposal has been accepted + rpc ConsumerGenesis(QueryConsumerGenesisRequest) + returns (QueryConsumerGenesisResponse) { + option (google.api.http).get = + "/interchain_security/ccv/provider/consumer_genesis/{chain_id}"; } } -message QueryConsumerGenesisRequest { - string chain_id = 1; -} +message QueryConsumerGenesisRequest { string chain_id = 1; } message QueryConsumerGenesisResponse { - interchain_security.ccv.consumer.v1.GenesisState genesis_state = 1 [(gogoproto.nullable) = false]; + interchain_security.ccv.consumer.v1.GenesisState genesis_state = 1 + [ (gogoproto.nullable) = false ]; } diff --git a/proto/interchain_security/ccv/v1/ccv.proto b/proto/interchain_security/ccv/v1/ccv.proto index fd7c64f96d..23729db928 100644 --- a/proto/interchain_security/ccv/v1/ccv.proto +++ b/proto/interchain_security/ccv/v1/ccv.proto @@ -9,13 +9,16 @@ import "cosmos/staking/v1beta1/staking.proto"; import "gogoproto/gogo.proto"; import "tendermint/abci/types.proto"; -// This packet is sent from provider chain to consumer chain if the validator set for consumer chain -// changes (due to new bonding/unbonding messages or slashing events) -// A VSCMatured packet from consumer chain will be sent asynchronously once unbonding period is over, -// and this will function as `UnbondingOver` message for this packet. +// This packet is sent from provider chain to consumer chain if the validator +// set for consumer chain changes (due to new bonding/unbonding messages or +// slashing events) A VSCMatured packet from consumer chain will be sent +// asynchronously once unbonding period is over, and this will function as +// `UnbondingOver` message for this packet. message ValidatorSetChangePacketData { - repeated .tendermint.abci.ValidatorUpdate validator_updates = 1 - [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"validator_updates\""]; + repeated .tendermint.abci.ValidatorUpdate validator_updates = 1 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"validator_updates\"" + ]; uint64 valset_update_id = 2; // consensus address of consumer chain validators // successfully slashed on the provider chain @@ -31,18 +34,30 @@ message UnbondingOp { // This packet is sent from the consumer chain to the provider chain // to notify that a VSC packet reached maturity on the consumer chain. message VSCMaturedPacketData { - // the id of the VSC packet that reached maturity + // the id of the VSC packet that reached maturity uint64 valset_update_id = 1; } -// This packet is sent from the consumer chain to the provider chain -// to request the slashing of a validator as a result of an infraction -// committed on the consumer chain. +// This packet is sent from the consumer chain to the provider chain +// to request the slashing of a validator as a result of an infraction +// committed on the consumer chain. message SlashPacketData { - tendermint.abci.Validator validator = 1 - [(gogoproto.nullable) = false, (gogoproto.moretags) = "yaml:\"validator\""]; + tendermint.abci.Validator validator = 1 [ + (gogoproto.nullable) = false, + (gogoproto.moretags) = "yaml:\"validator\"" + ]; // map to the infraction block height on the provider uint64 valset_update_id = 2; // tell if the slashing is for a downtime or a double-signing infraction cosmos.staking.v1beta1.InfractionType infraction = 3; } + +// UnbondingOpsIndex defines a list of unbonding operation ids. +message UnbondingOpsIndex { + repeated uint64 ids = 1; +} + +// MaturedUnbondingOps defines a list of ids corresponding to ids of matured unbonding operations. +message MaturedUnbondingOps { + repeated uint64 ids = 1; +} diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 7840db031a..1f77331450 100644 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -19,4 +19,4 @@ cd .. cp -r github.com/cosmos/interchain-security/* ./ rm -rf github.com -go mod tidy -compat=1.17 +go mod tidy -compat=1.18 diff --git a/tests/difference/core/driver/setup.go b/tests/difference/core/driver/setup.go index 0e14d1adea..3228c3e802 100644 --- a/tests/difference/core/driver/setup.go +++ b/tests/difference/core/driver/setup.go @@ -477,7 +477,7 @@ func (b *Builder) createConsumerGenesis(tmConfig *ibctesting.TendermintConfig) * "", // ignore distribution "", // ignore distribution ) - return consumertypes.NewInitialGenesisState(providerClient, providerConsState, valUpdates, params) + return consumertypes.NewInitialGenesisState(providerClient, providerConsState, valUpdates, consumertypes.SlashRequests{}, params) } func (b *Builder) createLink() { diff --git a/tests/e2e/channel_init_test.go b/tests/e2e/channel_init_test.go index 46f62b0e49..c319fda16f 100644 --- a/tests/e2e/channel_init_test.go +++ b/tests/e2e/channel_init_test.go @@ -6,42 +6,19 @@ import ( app "github.com/cosmos/interchain-security/app/consumer" - "fmt" - - ibctypes "github.com/cosmos/ibc-go/v3/modules/core/03-connection/types" - clienttmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" - - consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" - providerkeeper "github.com/cosmos/interchain-security/x/ccv/provider/keeper" - providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" tmtypes "github.com/tendermint/tendermint/types" - "github.com/cosmos/interchain-security/x/ccv/utils" - channeltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" - host "github.com/cosmos/ibc-go/v3/modules/core/24-host" - - "encoding/json" - "time" appConsumer "github.com/cosmos/interchain-security/app/consumer" - "github.com/cosmos/interchain-security/x/ccv/consumer" ccv "github.com/cosmos/interchain-security/x/ccv/types" abci "github.com/tendermint/tendermint/abci/types" - crypto "github.com/tendermint/tendermint/proto/tendermint/crypto" - sdk "github.com/cosmos/cosmos-sdk/types" - distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" - govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" - ibctesting "github.com/cosmos/ibc-go/v3/testing" - appProvider "github.com/cosmos/interchain-security/app/provider" - "github.com/cosmos/interchain-security/x/ccv/provider" - "github.com/cosmos/interchain-security/x/ccv/provider/types" ) -func (suite *ConsumerKeeperTestSuite) TestConsumerGenesis() { +func (suite *CCVTestSuite) TestConsumerGenesis() { genesis := suite.consumerChain.App.(*app.App).ConsumerKeeper.ExportGenesis(suite.consumerChain.GetContext()) suite.Require().Equal(suite.providerClient, genesis.ProviderClientState) @@ -90,19 +67,16 @@ func (suite *ConsumerKeeperTestSuite) TestConsumerGenesis() { ccv.ConsumerPortID, suite.path.EndpointA.ChannelID, clienttypes.NewHeight(1, 0), 0) suite.consumerChain.App.(*app.App).ConsumerKeeper.OnRecvVSCPacket(suite.consumerChain.GetContext(), packet, pd) - - // mocking the fact that consumer chain validators should be provider chain validators - // TODO: Fix testing suite so we can initialize both chains with the same validator set valUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.providerChain.Vals) restartGenesis := suite.consumerChain.App.(*app.App).ConsumerKeeper.ExportGenesis(suite.consumerChain.GetContext()) - restartGenesis.InitialValSet = valUpdates + suite.Require().Equal(valUpdates, restartGenesis.InitialValSet) // ensure reset genesis is set correctly providerChannel := suite.path.EndpointA.ChannelID suite.Require().Equal(providerChannel, restartGenesis.ProviderChannelId) maturityTime := suite.consumerChain.App.(*app.App).ConsumerKeeper.GetPacketMaturityTime(suite.consumerChain.GetContext(), 1) - unbondingPeriod, found := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetUnbondingTime(suite.ctx) + unbondingPeriod, found := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetUnbondingTime(suite.consumerCtx()) suite.Require().True(found) suite.Require().Equal(uint64(origTime.Add(unbondingPeriod).UnixNano()), maturityTime, "maturity time is not set correctly in genesis") @@ -110,747 +84,12 @@ func (suite *ConsumerKeeperTestSuite) TestConsumerGenesis() { suite.consumerChain.App.(*app.App).ConsumerKeeper.InitGenesis(suite.consumerChain.GetContext(), restartGenesis) }) } -func (suite *ConsumerTestSuite) TestOnChanOpenInit() { - var ( - channel *channeltypes.Channel - ) - - testCases := []struct { - name string - malleate func() - expPass bool - }{ - - { - "success", func() {}, true, - }, - { - "invalid: provider channel already established", func() { - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.SetProviderChannel(suite.ctx, "channel-2") - }, false, - }, - { - "invalid: UNORDERED channel", func() { - channel.Ordering = channeltypes.UNORDERED - }, false, - }, - { - "invalid port ID", func() { - suite.path.EndpointA.ChannelConfig.PortID = ibctesting.MockPort - }, false, - }, - { - "invalid version", func() { - channel.Version = "version" - }, false, - }, - { - "invalid counter party port ID", func() { - channel.Counterparty.PortId = ibctesting.MockPort - }, false, - }, - { - "invalid: verify provider chain failed", func() { - // setup a new path with provider client on consumer chain being different from genesis client - path := ibctesting.NewPath(suite.consumerChain, suite.providerChain) - // - channel config - path.EndpointA.ChannelConfig.PortID = ccv.ConsumerPortID - path.EndpointB.ChannelConfig.PortID = ccv.ProviderPortID - path.EndpointA.ChannelConfig.Version = ccv.Version - path.EndpointB.ChannelConfig.Version = ccv.Version - path.EndpointA.ChannelConfig.Order = channeltypes.ORDERED - path.EndpointB.ChannelConfig.Order = channeltypes.ORDERED - - // create consumer client on provider chain, and provider client on consumer chain - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - err := suite.createCustomClient(path.EndpointB, consumerUnbondingPeriod) - suite.Require().NoError(err) - err = suite.createCustomClient(path.EndpointA, providerUnbondingPeriod) - suite.Require().NoError(err) - - suite.coordinator.CreateConnections(path) - suite.path = path - channel.ConnectionHops = []string{suite.path.EndpointA.ConnectionID} - }, false, - }, - } - - for _, tc := range testCases { - tc := tc - - suite.Run(tc.name, func() { - suite.SetupTest() // reset - - suite.path.EndpointA.ChannelID = ibctesting.FirstChannelID - - counterparty := channeltypes.NewCounterparty(suite.path.EndpointB.ChannelConfig.PortID, "") - channel = &channeltypes.Channel{ - State: channeltypes.INIT, - Ordering: channeltypes.ORDERED, - Counterparty: counterparty, - ConnectionHops: []string{suite.path.EndpointA.ConnectionID}, - Version: ccv.Version, - } - - consumerModule := consumer.NewAppModule(suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper) - chanCap, err := suite.consumerChain.App.GetScopedIBCKeeper().NewCapability( - suite.ctx, - host.ChannelCapabilityPath( - ccv.ConsumerPortID, - suite.path.EndpointA.ChannelID, - ), - ) - suite.Require().NoError(err) - - tc.malleate() // explicitly change fields in channel and testChannel - - err = consumerModule.OnChanOpenInit( - suite.ctx, - channel.Ordering, - channel.GetConnectionHops(), - suite.path.EndpointA.ChannelConfig.PortID, - suite.path.EndpointA.ChannelID, - chanCap, - channel.Counterparty, - channel.GetVersion(), - ) - - if tc.expPass { - suite.Require().NoError(err) - } else { - suite.Require().Error(err) - } - - }) - } -} - -func (suite *ConsumerTestSuite) TestOnChanOpenTry() { - // OnOpenTry must error even with correct arguments - consumerModule := consumer.NewAppModule(suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper) - _, err := consumerModule.OnChanOpenTry( - suite.ctx, - channeltypes.ORDERED, - []string{"connection-1"}, - ccv.ConsumerPortID, - "channel-1", - nil, - channeltypes.NewCounterparty(ccv.ProviderPortID, "channel-1"), - ccv.Version, - ) - suite.Require().Error(err, "OnChanOpenTry callback must error on consumer chain") -} - -// TestOnChanOpenAck tests the consumer module's OnChanOpenAck implementation against the spec: -// https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-ccf-coack1 -func (suite *ConsumerTestSuite) TestOnChanOpenAck() { - - var ( - portID string - channelID string - metadataBz []byte - metadata providertypes.HandshakeMetadata - err error - ) - testCases := []struct { - name string - malleate func() - expPass bool - }{ - { - "success", func() {}, true, - }, - { - "invalid: provider channel already established", - func() { - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.SetProviderChannel(suite.ctx, "channel-2") - }, false, - }, - { - "invalid: cannot unmarshal ack metadata ", - func() { - metadataBz = []byte{78, 89, 20} - }, false, - }, - { - "invalid: mismatched versions", - func() { - // Set counter party version to an invalid value, passed as marshaled metadata - metadata.Version = "invalidVersion" - metadataBz, err = (&metadata).Marshal() - suite.Require().NoError(err) - }, false, - }, - // See ConsumerKeeper.GetConnectionHops as to why portID and channelID must be correct - { - "invalid: portID ", - func() { - portID = "invalidPort" - }, false, - }, - { - "invalid: channelID ", - func() { - channelID = "invalidChan" - }, false, - }, - } - - for _, tc := range testCases { - tc := tc - suite.Run(fmt.Sprintf("Case: %s", tc.name), func() { - suite.SetupTest() // reset - portID = ccv.ConsumerPortID - channelID = "channel-1" - counterChannelID := "channel-2" // per spec this is not required by onChanOpenAck() - suite.path.EndpointA.ChannelID = channelID - - // Set INIT channel on consumer chain - suite.consumerChain.App.GetIBCKeeper().ChannelKeeper.SetChannel( - suite.ctx, - ccv.ConsumerPortID, - channelID, - channeltypes.NewChannel( - channeltypes.INIT, - channeltypes.ORDERED, - channeltypes.NewCounterparty(ccv.ProviderPortID, ""), - []string{suite.path.EndpointA.ConnectionID}, - suite.path.EndpointA.ChannelConfig.Version, - ), - ) - - consumerModule := consumer.NewAppModule( - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper) - - metadata := providertypes.HandshakeMetadata{ - ProviderFeePoolAddr: "", // dummy address used - Version: suite.path.EndpointB.ChannelConfig.Version, - } - - metadataBz, err = (&metadata).Marshal() - suite.Require().NoError(err) - - tc.malleate() // Explicitly change fields already defined - - err = consumerModule.OnChanOpenAck( - suite.ctx, - portID, - channelID, - counterChannelID, - string(metadataBz), - ) - - if tc.expPass { - suite.Require().NoError(err) - } else { - suite.Require().Error(err) - } - }) - } -} - -func (suite *ConsumerTestSuite) TestOnChanOpenConfirm() { - consumerModule := consumer.NewAppModule(suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper) - err := consumerModule.OnChanOpenConfirm(suite.ctx, ccv.ConsumerPortID, "channel-1") - suite.Require().Error(err, "OnChanOpenConfirm callback must error on consumer chain") -} - -func (suite *ConsumerTestSuite) TestOnChanCloseInit() { - channelID := "channel-1" - testCases := []struct { - name string - setup func(suite *ConsumerTestSuite) - expError bool - }{ - { - name: "can close duplicate in-progress channel once provider channel is established", - setup: func(suite *ConsumerTestSuite) { - // Set INIT channel on consumer chain - suite.consumerChain.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.ctx, ccv.ConsumerPortID, channelID, - channeltypes.NewChannel( - channeltypes.INIT, channeltypes.ORDERED, channeltypes.NewCounterparty(ccv.ProviderPortID, ""), - []string{suite.path.EndpointA.ConnectionID}, suite.path.EndpointA.ChannelConfig.Version), - ) - suite.path.EndpointA.ChannelID = channelID - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.SetProviderChannel(suite.ctx, "different-channel") - }, - expError: false, - }, - { - name: "can close duplicate open channel once provider channel is established", - setup: func(suite *ConsumerTestSuite) { - // create open channel - suite.coordinator.CreateChannels(suite.path) - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.SetProviderChannel(suite.ctx, "different-channel") - }, - expError: false, - }, - { - name: "cannot close in-progress channel, no established channel yet", - setup: func(suite *ConsumerTestSuite) { - // Set INIT channel on consumer chain - suite.consumerChain.App.GetIBCKeeper().ChannelKeeper.SetChannel(suite.ctx, ccv.ConsumerPortID, channelID, - channeltypes.NewChannel( - channeltypes.INIT, channeltypes.ORDERED, channeltypes.NewCounterparty(ccv.ProviderPortID, ""), - []string{suite.path.EndpointA.ConnectionID}, suite.path.EndpointA.ChannelConfig.Version), - ) - suite.path.EndpointA.ChannelID = channelID - }, - expError: true, - }, - { - name: "cannot close provider channel", - setup: func(suite *ConsumerTestSuite) { - // create open channel - suite.coordinator.CreateChannels(suite.path) - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.SetProviderChannel(suite.ctx, suite.path.EndpointA.ChannelID) - }, - expError: true, - }, - } - - for _, tc := range testCases { - tc := tc - suite.Run(fmt.Sprintf("Case: %s", tc.name), func() { - suite.SetupTest() // reset suite - tc.setup(suite) - - consumerModule := consumer.NewAppModule(suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper) - - err := consumerModule.OnChanCloseInit(suite.ctx, ccv.ConsumerPortID, suite.path.EndpointA.ChannelID) - - if tc.expError { - suite.Require().Error(err) - } else { - suite.Require().NoError(err) - } - }) - } -} // TestProviderClientMatches tests that the provider client managed by the consumer keeper matches the client keeper's client state -func (suite *ConsumerKeeperTestSuite) TestProviderClientMatches() { - providerClientID, ok := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderClientID(suite.ctx) +func (suite *CCVTestSuite) TestProviderClientMatches() { + providerClientID, ok := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderClientID(suite.consumerCtx()) suite.Require().True(ok) - clientState, _ := suite.consumerChain.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.ctx, providerClientID) + clientState, _ := suite.consumerChain.App.GetIBCKeeper().ClientKeeper.GetClientState(suite.consumerCtx(), providerClientID) suite.Require().Equal(suite.providerClient, clientState, "stored client state does not match genesis provider client") } - -// TestVerifyProviderChain tests the VerifyProviderChain method for the consumer keeper -func (suite *ConsumerKeeperTestSuite) TestVerifyProviderChain() { - var connectionHops []string - channelID := "channel-0" - testCases := []struct { - name string - setup func(suite *ConsumerKeeperTestSuite) - connectionHops []string - expError bool - }{ - { - name: "success", - setup: func(suite *ConsumerKeeperTestSuite) { - // create consumer client on provider chain - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - suite.CreateCustomClient(suite.path.EndpointB, consumerUnbondingPeriod) - err := suite.path.EndpointB.CreateClient() - suite.Require().NoError(err) - - suite.coordinator.CreateConnections(suite.path) - - // set connection hops to be connection hop from path endpoint - connectionHops = []string{suite.path.EndpointA.ConnectionID} - }, - connectionHops: []string{suite.path.EndpointA.ConnectionID}, - expError: false, - }, - { - name: "connection hops is not length 1", - setup: func(suite *ConsumerKeeperTestSuite) { - // create consumer client on provider chain - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - suite.CreateCustomClient(suite.path.EndpointB, consumerUnbondingPeriod) - - suite.coordinator.CreateConnections(suite.path) - - // set connection hops to be connection hop from path endpoint - connectionHops = []string{suite.path.EndpointA.ConnectionID, "connection-2"} - }, - expError: true, - }, - { - name: "connection does not exist", - setup: func(suite *ConsumerKeeperTestSuite) { - // set connection hops to be connection hop from path endpoint - connectionHops = []string{"connection-dne"} - }, - expError: true, - }, - { - name: "clientID does not match", - setup: func(suite *ConsumerKeeperTestSuite) { - // create consumer client on provider chain - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - suite.CreateCustomClient(suite.path.EndpointB, consumerUnbondingPeriod) - - // create a new provider client on consumer chain that is different from the one in genesis - suite.CreateCustomClient(suite.path.EndpointA, providerUnbondingPeriod) - - suite.coordinator.CreateConnections(suite.path) - - // set connection hops to be connection hop from path endpoint - connectionHops = []string{suite.path.EndpointA.ConnectionID} - }, - expError: true, - }, - } - - for _, tc := range testCases { - tc := tc - suite.Run(fmt.Sprintf("Case: %s", tc.name), func() { - suite.SetupTest() // reset suite - - tc.setup(suite) - - // Verify ProviderChain on consumer chain using path returned by setup - err := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.VerifyProviderChain(suite.ctx, channelID, connectionHops) - - if tc.expError { - suite.Require().Error(err, "invalid case did not return error") - } else { - suite.Require().NoError(err, "valid case returned error") - } - }) - } -} - -// TestOnChanOpenTry validates the provider's OnChanOpenTry implementation against the spec: -// https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-cotry1 -func (suite *ProviderTestSuite) TestOnChanOpenTry() { - var ( - channel *channeltypes.Channel - counterpartyVersion string - providerKeeper *providerkeeper.Keeper - ) - - testCases := []struct { - name string - malleate func() - expPass bool - }{ - { - "success", func() {}, true, - }, - { - "invalid order", func() { - channel.Ordering = channeltypes.UNORDERED - }, false, - }, - { - "invalid port ID", func() { - suite.path.EndpointA.ChannelConfig.PortID = ibctesting.MockPort - }, false, - }, - { - "invalid counter party port ID", func() { - channel.Counterparty.PortId = ibctesting.MockPort - }, false, - }, - { - "invalid counter party version", func() { - counterpartyVersion = "invalidVersion" - }, false, - }, - { - "unexpected client ID mapped to chain ID", func() { - providerKeeper.SetConsumerClientId( - suite.providerCtx(), - suite.path.EndpointA.Chain.ChainID, - "invalidClientID", - ) - }, false, - }, - { - "other CCV channel exists for this consumer chain", func() { - providerKeeper.SetChainToChannel( - suite.providerCtx(), - suite.path.EndpointA.Chain.ChainID, - "some existing channel ID", - ) - }, false, - }, - } - - for _, tc := range testCases { - tc := tc - - suite.Run(tc.name, func() { - suite.SetupTest() // reset - - suite.path.EndpointA.ChannelConfig.PortID = ccv.ProviderPortID - suite.path.EndpointA.ChannelID = "providerChanID" - suite.path.EndpointB.ChannelConfig.PortID = ccv.ConsumerPortID - suite.path.EndpointB.ChannelID = "consumerChanID" - suite.path.EndpointA.ConnectionID = "ConnID" - suite.path.EndpointA.ClientID = "ClientID" - suite.path.EndpointA.Chain.ChainID = "ChainID" - - counterparty := channeltypes.NewCounterparty( - suite.path.EndpointB.ChannelConfig.PortID, - suite.path.EndpointA.ChannelID, - ) - counterpartyVersion = ccv.Version - - channel = &channeltypes.Channel{ - State: channeltypes.INIT, - Ordering: channeltypes.ORDERED, - Counterparty: counterparty, - ConnectionHops: []string{suite.path.EndpointA.ConnectionID}, - Version: counterpartyVersion, - } - - providerKeeper = &suite.providerChain.App.(*appProvider.App).ProviderKeeper - providerModule := provider.NewAppModule(providerKeeper) - chanCap, err := suite.providerChain.App.GetScopedIBCKeeper().NewCapability( - suite.providerCtx(), - host.ChannelCapabilityPath( - suite.path.EndpointA.ChannelConfig.PortID, - suite.path.EndpointA.ChannelID, - ), - ) - suite.Require().NoError(err) - - // Manual keeper setup - connKeeper := suite.providerChain.App.GetIBCKeeper().ConnectionKeeper - connKeeper.SetConnection( - suite.providerCtx(), - suite.path.EndpointA.ConnectionID, - ibctypes.ConnectionEnd{ - ClientId: suite.path.EndpointA.ClientID, - }, - ) - clientKeeper := suite.providerChain.App.GetIBCKeeper().ClientKeeper - clientKeeper.SetClientState( - suite.providerCtx(), - suite.path.EndpointA.ClientID, - &clienttmtypes.ClientState{ - ChainId: suite.path.EndpointA.Chain.ChainID, - }, - ) - providerKeeper.SetConsumerClientId( - suite.providerCtx(), - suite.path.EndpointA.Chain.ChainID, - suite.path.EndpointA.ClientID, - ) - - tc.malleate() // explicitly change fields - - metadata, err := providerModule.OnChanOpenTry( - suite.providerCtx(), - channel.Ordering, - channel.GetConnectionHops(), - suite.path.EndpointA.ChannelConfig.PortID, - suite.path.EndpointA.ChannelID, - chanCap, - channel.Counterparty, - counterpartyVersion, - ) - - if tc.expPass { - suite.Require().NoError(err) - md := &providertypes.HandshakeMetadata{} - err = md.Unmarshal([]byte(metadata)) - suite.Require().NoError(err) - } else { - suite.Require().Error(err) - } - }) - } -} - -func (suite *ProviderTestSuite) TestOnChanOpenInit() { - // OnChanOpenInit must error for provider even with correct arguments - providerModule := provider.NewAppModule(&suite.providerChain.App.(*appProvider.App).ProviderKeeper) - - err := providerModule.OnChanOpenInit( - suite.providerCtx(), - channeltypes.ORDERED, - []string{"connection-1"}, - ccv.ProviderPortID, - "channel-1", - nil, - channeltypes.NewCounterparty(ccv.ConsumerPortID, "channel-1"), - ccv.Version, - ) - suite.Require().Error(err, "OnChanOpenInit must error on provider chain") -} - -// TestConsumerChainProposalHandler tests the handler for consumer chain proposals -// for both CreateConsumerChainProposal and StopConsumerChainProposal -// -// TODO: Determine if it's possible to make this a unit test -func (suite *ProviderTestSuite) TestConsumerChainProposalHandler() { - var ( - ctx sdk.Context - content govtypes.Content - err error - ) - - testCases := []struct { - name string - malleate func(*ProviderTestSuite) - expPass bool - }{ - { - "valid create consumerchain proposal", func(suite *ProviderTestSuite) { - initialHeight := clienttypes.NewHeight(2, 3) - // ctx blocktime is after proposal's spawn time - ctx = suite.providerChain.GetContext().WithBlockTime(time.Now().Add(time.Hour)) - content = types.NewCreateConsumerChainProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()) - }, true, - }, - { - "valid stop consumerchain proposal", func(suite *ProviderTestSuite) { - ctx = suite.providerChain.GetContext().WithBlockTime(time.Now().Add(time.Hour)) - content, err = types.NewStopConsumerChainProposal("title", "description", "chainID", time.Now()) - suite.Require().NoError(err) - }, true, - }, - { - "nil proposal", func(suite *ProviderTestSuite) { - ctx = suite.providerChain.GetContext() - content = nil - }, false, - }, - { - "unsupported proposal type", func(suite *ProviderTestSuite) { - ctx = suite.providerChain.GetContext() - content = distributiontypes.NewCommunityPoolSpendProposal(ibctesting.Title, ibctesting.Description, suite.providerChain.SenderAccount.GetAddress(), sdk.NewCoins(sdk.NewCoin("communityfunds", sdk.NewInt(10)))) - }, false, - }, - } - - for _, tc := range testCases { - tc := tc - - suite.Run(tc.name, func() { - suite.SetupTest() // reset - - tc.malleate(suite) - - proposalHandler := provider.NewConsumerChainProposalHandler(suite.providerChain.App.(*appProvider.App).ProviderKeeper) - - err = proposalHandler(ctx, content) - - if tc.expPass { - suite.Require().NoError(err) - } else { - suite.Require().Error(err) - } - }) - } -} - -func (suite *ProviderKeeperTestSuite) TestMakeConsumerGenesis() { - suite.SetupTest() - - actualGenesis, err := suite.providerChain.App.(*appProvider.App).ProviderKeeper.MakeConsumerGenesis(suite.providerChain.GetContext()) - suite.Require().NoError(err) - - jsonString := `{"params":{"enabled":true, "blocks_per_distribution_transmission":1000, "lock_unbonding_on_timeout": false},"new_chain":true,"provider_client_state":{"chain_id":"testchain1","trust_level":{"numerator":1,"denominator":3},"trusting_period":907200000000000,"unbonding_period":1814400000000000,"max_clock_drift":10000000000,"frozen_height":{},"latest_height":{"revision_height":5},"proof_specs":[{"leaf_spec":{"hash":1,"prehash_value":1,"length":1,"prefix":"AA=="},"inner_spec":{"child_order":[0,1],"child_size":33,"min_prefix_length":4,"max_prefix_length":12,"hash":1}},{"leaf_spec":{"hash":1,"prehash_value":1,"length":1,"prefix":"AA=="},"inner_spec":{"child_order":[0,1],"child_size":32,"min_prefix_length":1,"max_prefix_length":1,"hash":1}}],"upgrade_path":["upgrade","upgradedIBCState"],"allow_update_after_expiry":true,"allow_update_after_misbehaviour":true},"provider_consensus_state":{"timestamp":"2020-01-02T00:00:10Z","root":{"hash":"LpGpeyQVLUo9HpdsgJr12NP2eCICspcULiWa5u9udOA="},"next_validators_hash":"E30CE736441FB9101FADDAF7E578ABBE6DFDB67207112350A9A904D554E1F5BE"},"unbonding_sequences":null,"initial_val_set":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"dcASx5/LIKZqagJWN0frOlFtcvz91frYmj/zmoZRWro="},"power":1}]}` - - var expectedGenesis consumertypes.GenesisState - err = json.Unmarshal([]byte(jsonString), &expectedGenesis) - suite.Require().NoError(err) - - // Zero out differing fields- TODO: figure out how to get the test suite to - // keep these deterministic - actualGenesis.ProviderConsensusState.NextValidatorsHash = []byte{} - expectedGenesis.ProviderConsensusState.NextValidatorsHash = []byte{} - - // set valset to one empty validator because SetupTest() creates 4 validators per chain - actualGenesis.InitialValSet = []abci.ValidatorUpdate{{PubKey: crypto.PublicKey{}, Power: actualGenesis.InitialValSet[0].Power}} - expectedGenesis.InitialValSet[0].PubKey = crypto.PublicKey{} - - actualGenesis.ProviderConsensusState.Root.Hash = []byte{} - expectedGenesis.ProviderConsensusState.Root.Hash = []byte{} - - suite.Require().Equal(actualGenesis, expectedGenesis, "consumer chain genesis created incorrectly") -} - -func (suite *ProviderKeeperTestSuite) TestCreateConsumerChainProposal() { - var ( - ctx sdk.Context - proposal *types.CreateConsumerChainProposal - ok bool - ) - - chainID := "chainID" - initialHeight := clienttypes.NewHeight(2, 3) - lockUbdOnTimeout := false - - testCases := []struct { - name string - malleate func(*ProviderKeeperTestSuite) - expPass bool - spawnReached bool - }{ - { - "valid create consumer chain proposal: spawn time reached", func(suite *ProviderKeeperTestSuite) { - // ctx blocktime is after proposal's spawn time - ctx = suite.providerChain.GetContext().WithBlockTime(time.Now().Add(time.Hour)) - content := types.NewCreateConsumerChainProposal("title", "description", chainID, initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()) - proposal, ok = content.(*types.CreateConsumerChainProposal) - suite.Require().True(ok) - proposal.LockUnbondingOnTimeout = lockUbdOnTimeout - }, true, true, - }, - { - "valid proposal: spawn time has not yet been reached", func(suite *ProviderKeeperTestSuite) { - // ctx blocktime is before proposal's spawn time - ctx = suite.providerChain.GetContext().WithBlockTime(time.Now()) - content := types.NewCreateConsumerChainProposal("title", "description", chainID, initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now().Add(time.Hour)) - proposal, ok = content.(*types.CreateConsumerChainProposal) - suite.Require().True(ok) - proposal.LockUnbondingOnTimeout = lockUbdOnTimeout - }, true, false, - }, - } - - for _, tc := range testCases { - tc := tc - - suite.Run(tc.name, func() { - suite.SetupTest() - - tc.malleate(suite) - - err := suite.providerChain.App.(*appProvider.App).ProviderKeeper.CreateConsumerChainProposal(ctx, proposal) - if tc.expPass { - suite.Require().NoError(err, "error returned on valid case") - if tc.spawnReached { - clientId, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerClientId(ctx, chainID) - suite.Require().True(found, "consumer client not found") - consumerGenesis, ok := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerGenesis(ctx, chainID) - suite.Require().True(ok) - - expectedGenesis, err := suite.providerChain.App.(*appProvider.App).ProviderKeeper.MakeConsumerGenesis(ctx) - suite.Require().NoError(err) - - suite.Require().Equal(expectedGenesis, consumerGenesis) - suite.Require().NotEqual("", clientId, "consumer client was not created after spawn time reached") - } else { - gotProposal := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetPendingCreateProposal(ctx, proposal.SpawnTime, chainID) - suite.Require().Equal(initialHeight, gotProposal.InitialHeight, "unexpected pending proposal (InitialHeight)") - suite.Require().Equal(lockUbdOnTimeout, gotProposal.LockUnbondingOnTimeout, "unexpected pending proposal (LockUnbondingOnTimeout)") - } - } else { - suite.Require().Error(err, "did not return error on invalid case") - } - }) - } -} diff --git a/tests/e2e/common_test.go b/tests/e2e/common_test.go index 97a0c8fdd0..396aa02ee3 100644 --- a/tests/e2e/common_test.go +++ b/tests/e2e/common_test.go @@ -32,19 +32,19 @@ const ( Consumer ) -func (s *ProviderTestSuite) providerCtx() sdk.Context { +func (s *CCVTestSuite) providerCtx() sdk.Context { return s.providerChain.GetContext() } -func (s *ProviderTestSuite) consumerCtx() sdk.Context { +func (s *CCVTestSuite) consumerCtx() sdk.Context { return s.consumerChain.GetContext() } -func (s *ProviderTestSuite) providerBondDenom() string { +func (s *CCVTestSuite) providerBondDenom() string { return s.providerChain.App.(*appProvider.App).StakingKeeper.BondDenom(s.providerCtx()) } -func (s *ProviderTestSuite) getVal(index int) (validator stakingtypes.Validator, valAddr sdk.ValAddress) { +func (s *CCVTestSuite) getVal(index int) (validator stakingtypes.Validator, valAddr sdk.ValAddress) { // Choose a validator, and get its address and data structure into the correct types tmValidator := s.providerChain.Vals.Validators[index] valAddr, err := sdk.ValAddressFromHex(tmValidator.Address.String()) @@ -55,13 +55,13 @@ func (s *ProviderTestSuite) getVal(index int) (validator stakingtypes.Validator, return validator, valAddr } -func getBalance(s *ProviderTestSuite, providerCtx sdk.Context, delAddr sdk.AccAddress) sdk.Int { +func getBalance(s *CCVTestSuite, providerCtx sdk.Context, delAddr sdk.AccAddress) sdk.Int { return s.providerChain.App.(*appProvider.App).BankKeeper.GetBalance(providerCtx, delAddr, s.providerBondDenom()).Amount } // delegateAndUndelegate delegates bondAmt from delAddr to the first validator // and then immediately undelegates 1/shareDiv of that delegation -func delegateAndUndelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, bondAmt sdk.Int, shareDiv int64) (initBalance sdk.Int, valsetUpdateId uint64) { +func delegateAndUndelegate(s *CCVTestSuite, delAddr sdk.AccAddress, bondAmt sdk.Int, shareDiv int64) (initBalance sdk.Int, valsetUpdateId uint64) { // delegate initBalance, shares, valAddr := delegate(s, delAddr, bondAmt) @@ -82,7 +82,7 @@ func delegateAndUndelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, bondAmt // // Note: This function advances blocks in-between operations, where validator powers are // not checked, since they are checked in integration tests. -func delegateAndRedelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, +func delegateAndRedelegate(s *CCVTestSuite, delAddr sdk.AccAddress, srcValAddr sdk.ValAddress, dstValAddr sdk.ValAddress, amount sdk.Int) { stakingKeeper := s.providerChain.App.(*appProvider.App).StakingKeeper @@ -115,7 +115,7 @@ func delegateAndRedelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, } // delegate delegates bondAmt to the first validator -func delegate(s *ProviderTestSuite, delAddr sdk.AccAddress, bondAmt sdk.Int) (initBalance sdk.Int, shares sdk.Dec, valAddr sdk.ValAddress) { +func delegate(s *CCVTestSuite, delAddr sdk.AccAddress, bondAmt sdk.Int) (initBalance sdk.Int, shares sdk.Dec, valAddr sdk.ValAddress) { initBalance = getBalance(s, s.providerCtx(), delAddr) // choose a validator validator, valAddr := s.getVal(0) @@ -135,7 +135,7 @@ func delegate(s *ProviderTestSuite, delAddr sdk.AccAddress, bondAmt sdk.Int) (in } // undelegate unbonds an amount of delegator shares from a given validator -func undelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, valAddr sdk.ValAddress, sharesAmount sdk.Dec) (valsetUpdateId uint64) { +func undelegate(s *CCVTestSuite, delAddr sdk.AccAddress, valAddr sdk.ValAddress, sharesAmount sdk.Dec) (valsetUpdateId uint64) { _, err := s.providerChain.App.(*appProvider.App).StakingKeeper.Undelegate(s.providerCtx(), delAddr, valAddr, sharesAmount) s.Require().NoError(err) @@ -147,7 +147,7 @@ func undelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, valAddr sdk.ValAdd // Executes a BeginRedelegation (unbonding and redelegation) operation // on the provider chain using delegated funds from delAddr -func redelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, valSrcAddr sdk.ValAddress, +func redelegate(s *CCVTestSuite, delAddr sdk.AccAddress, valSrcAddr sdk.ValAddress, ValDstAddr sdk.ValAddress, sharesAmount sdk.Dec) { ctx := s.providerCtx() @@ -179,7 +179,7 @@ func redelegate(s *ProviderTestSuite, delAddr sdk.AccAddress, valSrcAddr sdk.Val // relayAllCommittedPackets relays all committed packets from `srcChain` on `path` func relayAllCommittedPackets( - s *ProviderTestSuite, + s *CCVTestSuite, srcChain *ibctesting.TestChain, path *ibctesting.Path, portID string, @@ -212,7 +212,7 @@ func relayAllCommittedPackets( // // Note that it is expected for the provider unbonding period // to be one day larger than the consumer unbonding period. -func incrementTimeByUnbondingPeriod(s *ProviderTestSuite, chainType ChainType) { +func incrementTimeByUnbondingPeriod(s *CCVTestSuite, chainType ChainType) { // Get unboding period from staking keeper providerUnbondingPeriod := s.providerChain.App.GetStakingKeeper().UnbondingTime(s.providerCtx()) consumerUnbondingPeriod, found := s.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetUnbondingTime(s.consumerCtx()) @@ -239,13 +239,13 @@ func incrementTimeByUnbondingPeriod(s *ProviderTestSuite, chainType ChainType) { } } -func checkStakingUnbondingOps(s *ProviderTestSuite, id uint64, found bool, onHold bool) { +func checkStakingUnbondingOps(s *CCVTestSuite, id uint64, found bool, onHold bool) { stakingUnbondingOp, wasFound := getStakingUnbondingDelegationEntry(s.providerCtx(), s.providerChain.App.(*appProvider.App).StakingKeeper, id) s.Require().True(found == wasFound) s.Require().True(onHold == (0 < stakingUnbondingOp.UnbondingOnHoldRefCount)) } -func checkCCVUnbondingOp(s *ProviderTestSuite, providerCtx sdk.Context, chainID string, valUpdateID uint64, found bool) { +func checkCCVUnbondingOp(s *CCVTestSuite, providerCtx sdk.Context, chainID string, valUpdateID uint64, found bool) { entries, wasFound := s.providerChain.App.(*appProvider.App).ProviderKeeper.GetUnbondingOpsFromIndex(providerCtx, chainID, valUpdateID) s.Require().True(found == wasFound) if found { @@ -257,7 +257,7 @@ func checkCCVUnbondingOp(s *ProviderTestSuite, providerCtx sdk.Context, chainID // Checks that an expected amount of redelegations exist for a delegator // via the staking keeper, then returns those redelegations. -func checkRedelegations(s *ProviderTestSuite, delAddr sdk.AccAddress, +func checkRedelegations(s *CCVTestSuite, delAddr sdk.AccAddress, expect uint16) []stakingtypes.Redelegation { redelegations := s.providerChain.App.(*appProvider.App).StakingKeeper. @@ -269,7 +269,7 @@ func checkRedelegations(s *ProviderTestSuite, delAddr sdk.AccAddress, // Checks that a redelegation entry has a completion time equal to an expected time func checkRedelegationEntryCompletionTime( - s *ProviderTestSuite, entry stakingtypes.RedelegationEntry, expectedCompletion time.Time) { + s *CCVTestSuite, entry stakingtypes.RedelegationEntry, expectedCompletion time.Time) { s.Require().Equal(expectedCompletion, entry.CompletionTime) } @@ -289,7 +289,7 @@ func getStakingUnbondingDelegationEntry(ctx sdk.Context, k stakingkeeper.Keeper, // SendEmptyVSCPacket sends a VSC packet without any changes // to ensure that the channel gets established -func (suite *ConsumerKeeperTestSuite) SendEmptyVSCPacket() { +func (suite *CCVTestSuite) SendEmptyVSCPacket() { providerKeeper := suite.providerChain.App.(*appProvider.App).ProviderKeeper oldBlockTime := suite.providerChain.GetContext().BlockTime() @@ -318,7 +318,7 @@ func (suite *ConsumerKeeperTestSuite) SendEmptyVSCPacket() { // commitSlashPacket returns a commit hash for the given slash packet data // Note that it must be called before sending the embedding IBC packet. -func (suite *ConsumerKeeperTestSuite) commitSlashPacket(ctx sdk.Context, packetData ccv.SlashPacketData) []byte { +func (suite *CCVTestSuite) commitSlashPacket(ctx sdk.Context, packetData ccv.SlashPacketData) []byte { oldBlockTime := ctx.BlockTime() timeout := uint64(ccv.GetTimeoutTimestamp(oldBlockTime).UnixNano()) @@ -329,7 +329,7 @@ func (suite *ConsumerKeeperTestSuite) commitSlashPacket(ctx sdk.Context, packetD } // incrementTimeBy increments the overall time by jumpPeriod -func incrementTimeBy(s *ConsumerKeeperTestSuite, jumpPeriod time.Duration) { +func incrementTimeBy(s *CCVTestSuite, jumpPeriod time.Duration) { // Get unboding period from staking keeper consumerUnbondingPeriod, found := s.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetUnbondingTime(s.consumerChain.GetContext()) s.Require().True(found) @@ -356,7 +356,7 @@ func incrementTimeBy(s *ConsumerKeeperTestSuite, jumpPeriod time.Duration) { // using the given unbonding period. // It will update the clientID for the endpoint if the message // is successfully executed. -func (suite *ConsumerKeeperTestSuite) CreateCustomClient(endpoint *ibctesting.Endpoint, unbondingPeriod time.Duration) { +func (suite *CCVTestSuite) CreateCustomClient(endpoint *ibctesting.Endpoint, unbondingPeriod time.Duration) { // ensure counterparty has committed state endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain) @@ -386,42 +386,3 @@ func (suite *ConsumerKeeperTestSuite) CreateCustomClient(endpoint *ibctesting.En endpoint.ClientID, err = ibctesting.ParseClientIDFromEvents(res.GetEvents()) require.NoError(endpoint.Chain.T, err) } - -// createCustomClient creates an IBC client on the endpoint -// using the given unbonding period. -// It will update the clientID for the endpoint if the message -// is successfully executed. -func (suite *ConsumerTestSuite) createCustomClient(endpoint *ibctesting.Endpoint, unbondingPeriod time.Duration) (err error) { - // ensure counterparty has committed state - endpoint.Chain.Coordinator.CommitBlock(endpoint.Counterparty.Chain) - - suite.Require().Equal(exported.Tendermint, endpoint.ClientConfig.GetClientType(), "only Tendermint client supported") - - tmConfig, ok := endpoint.ClientConfig.(*ibctesting.TendermintConfig) - require.True(endpoint.Chain.T, ok) - tmConfig.UnbondingPeriod = unbondingPeriod - tmConfig.TrustingPeriod = unbondingPeriod / utils.TrustingPeriodFraction - - height := endpoint.Counterparty.Chain.LastHeader.GetHeight().(clienttypes.Height) - UpgradePath := []string{"upgrade", "upgradedIBCState"} - clientState := ibctmtypes.NewClientState( - endpoint.Counterparty.Chain.ChainID, tmConfig.TrustLevel, tmConfig.TrustingPeriod, tmConfig.UnbondingPeriod, tmConfig.MaxClockDrift, - height, commitmenttypes.GetSDKSpecs(), UpgradePath, tmConfig.AllowUpdateAfterExpiry, tmConfig.AllowUpdateAfterMisbehaviour, - ) - consensusState := endpoint.Counterparty.Chain.LastHeader.ConsensusState() - - msg, err := clienttypes.NewMsgCreateClient( - clientState, consensusState, endpoint.Chain.SenderAccount.GetAddress().String(), - ) - require.NoError(endpoint.Chain.T, err) - - res, err := endpoint.Chain.SendMsgs(msg) - if err != nil { - return err - } - - endpoint.ClientID, err = ibctesting.ParseClientIDFromEvents(res.GetEvents()) - require.NoError(endpoint.Chain.T, err) - - return nil -} diff --git a/tests/e2e/democracy_test.go b/tests/e2e/democracy_test.go new file mode 100644 index 0000000000..f399e04627 --- /dev/null +++ b/tests/e2e/democracy_test.go @@ -0,0 +1,262 @@ +package e2e_test + +import ( + "bytes" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + transfertypes "github.com/cosmos/ibc-go/v3/modules/apps/transfer/types" + + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" + channeltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" + ibctesting "github.com/cosmos/ibc-go/v3/testing" + + appConsumer "github.com/cosmos/interchain-security/app/consumer-democracy" + appProvider "github.com/cosmos/interchain-security/app/provider" + "github.com/cosmos/interchain-security/testutil/simapp" + consumerkeeper "github.com/cosmos/interchain-security/x/ccv/consumer/keeper" + consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/cosmos/interchain-security/x/ccv/utils" + + tmtypes "github.com/tendermint/tendermint/types" + + "github.com/stretchr/testify/suite" +) + +var consumerFraction, _ = sdk.NewDecFromStr(consumerkeeper.ConsumerRedistributeFrac) + +type ConsumerDemocracyTestSuite struct { + suite.Suite + + coordinator *ibctesting.Coordinator + + // testing chains + providerChain *ibctesting.TestChain + consumerChain *ibctesting.TestChain + + path *ibctesting.Path + transferPath *ibctesting.Path +} + +func (s *ConsumerDemocracyTestSuite) SetupTest() { + s.coordinator, s.providerChain, s.consumerChain = simapp.NewProviderConsumerDemocracyCoordinator(s.T()) + + // valsets must match + providerValUpdates := tmtypes.TM2PB.ValidatorUpdates(s.providerChain.Vals) + consumerValUpdates := tmtypes.TM2PB.ValidatorUpdates(s.consumerChain.Vals) + s.Require().True(len(providerValUpdates) == len(consumerValUpdates), "initial valset not matching") + for i := 0; i < len(providerValUpdates); i++ { + addr1 := utils.GetChangePubKeyAddress(providerValUpdates[i]) + addr2 := utils.GetChangePubKeyAddress(consumerValUpdates[i]) + s.Require().True(bytes.Equal(addr1, addr2), "validator mismatch") + } + + // move both chains to the next block + s.providerChain.NextBlock() + s.consumerChain.NextBlock() + + // create consumer client on provider chain and set as consumer client for consumer chainID in provider keeper. + err := s.providerChain.App.(*appProvider.App).ProviderKeeper.CreateConsumerClient( + s.providerCtx(), + s.consumerChain.ChainID, + s.consumerChain.LastHeader.GetHeight().(clienttypes.Height), + false, + ) + s.Require().NoError(err) + + // move provider to next block to commit the state + s.providerChain.NextBlock() + + // initialize the consumer chain with the genesis state stored on the provider + consumerGenesis, found := s.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerGenesis( + s.providerCtx(), + s.consumerChain.ChainID, + ) + s.Require().True(found, "consumer genesis not found") + s.consumerChain.App.(*appConsumer.App).ConsumerKeeper.InitGenesis(s.consumerChain.GetContext(), &consumerGenesis) + + // create path for the CCV channel + s.path = ibctesting.NewPath(s.consumerChain, s.providerChain) + + // update CCV path with correct info + // - set provider endpoint's clientID + consumerClient, found := s.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerClientId( + s.providerCtx(), + s.consumerChain.ChainID, + ) + s.Require().True(found, "consumer client not found") + s.path.EndpointB.ClientID = consumerClient + // - set consumer endpoint's clientID + providerClient, found := s.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderClientID(s.consumerChain.GetContext()) + s.Require().True(found, "provider client not found") + s.path.EndpointA.ClientID = providerClient + // - client config + providerUnbondingPeriod := s.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(s.providerCtx()) + s.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = providerUnbondingPeriod + s.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = providerUnbondingPeriod / utils.TrustingPeriodFraction + consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) + s.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = consumerUnbondingPeriod + s.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = consumerUnbondingPeriod / utils.TrustingPeriodFraction + // - channel config + s.path.EndpointA.ChannelConfig.PortID = types.ConsumerPortID + s.path.EndpointB.ChannelConfig.PortID = types.ProviderPortID + s.path.EndpointA.ChannelConfig.Version = types.Version + s.path.EndpointB.ChannelConfig.Version = types.Version + s.path.EndpointA.ChannelConfig.Order = channeltypes.ORDERED + s.path.EndpointB.ChannelConfig.Order = channeltypes.ORDERED + + // set chains sender account number + // TODO: to be fixed in #151 + err = s.path.EndpointB.Chain.SenderAccount.SetAccountNumber(6) + s.Require().NoError(err) + err = s.path.EndpointA.Chain.SenderAccount.SetAccountNumber(0) + s.Require().NoError(err) + + // create path for the transfer channel + s.transferPath = ibctesting.NewPath(s.consumerChain, s.providerChain) + s.transferPath.EndpointA.ChannelConfig.PortID = transfertypes.PortID + s.transferPath.EndpointB.ChannelConfig.PortID = transfertypes.PortID + s.transferPath.EndpointA.ChannelConfig.Version = transfertypes.Version + s.transferPath.EndpointB.ChannelConfig.Version = transfertypes.Version +} + +func (s *ConsumerDemocracyTestSuite) SetupCCVChannel() { + s.StartSetupCCVChannel() + s.CompleteSetupCCVChannel() + s.SetupTransferChannel() +} + +func (s *ConsumerDemocracyTestSuite) StartSetupCCVChannel() { + s.coordinator.CreateConnections(s.path) + + err := s.path.EndpointA.ChanOpenInit() + s.Require().NoError(err) + + err = s.path.EndpointB.ChanOpenTry() + s.Require().NoError(err) +} + +func (s *ConsumerDemocracyTestSuite) CompleteSetupCCVChannel() { + err := s.path.EndpointA.ChanOpenAck() + s.Require().NoError(err) + + err = s.path.EndpointB.ChanOpenConfirm() + s.Require().NoError(err) + + // ensure counterparty is up to date + err = s.path.EndpointA.UpdateClient() + s.Require().NoError(err) +} + +func (s *ConsumerDemocracyTestSuite) SetupTransferChannel() { + // transfer path will use the same connection as ccv path + + s.transferPath.EndpointA.ClientID = s.path.EndpointA.ClientID + s.transferPath.EndpointA.ConnectionID = s.path.EndpointA.ConnectionID + s.transferPath.EndpointB.ClientID = s.path.EndpointB.ClientID + s.transferPath.EndpointB.ConnectionID = s.path.EndpointB.ConnectionID + + // CCV channel handshake will automatically initiate transfer channel handshake on ACK + // so transfer channel will be on stage INIT when CompleteSetupCCVChannel returns. + s.transferPath.EndpointA.ChannelID = s.consumerChain.App.(*appConsumer.App). + ConsumerKeeper.GetDistributionTransmissionChannel(s.consumerChain.GetContext()) + + // Complete TRY, ACK, CONFIRM for transfer path + err := s.transferPath.EndpointB.ChanOpenTry() + s.Require().NoError(err) + + err = s.transferPath.EndpointA.ChanOpenAck() + s.Require().NoError(err) + + err = s.transferPath.EndpointB.ChanOpenConfirm() + s.Require().NoError(err) + + // ensure counterparty is up to date + err = s.transferPath.EndpointA.UpdateClient() + s.Require().NoError(err) +} + +func TestConsumerDemocracyTestSuite(t *testing.T) { + suite.Run(t, new(ConsumerDemocracyTestSuite)) +} + +func (s *ConsumerDemocracyTestSuite) TestDemocracyRewarsDistribution() { + + s.consumerChain.NextBlock() + stakingKeeper := s.consumerChain.App.(*appConsumer.App).StakingKeeper + authKeeper := s.consumerChain.App.(*appConsumer.App).AccountKeeper + distrKeeper := s.consumerChain.App.(*appConsumer.App).DistrKeeper + bankKeeper := s.consumerChain.App.(*appConsumer.App).BankKeeper + bondDenom := stakingKeeper.BondDenom(s.consumerCtx()) + + currentRepresentativesRewards := map[string]sdk.Dec{} + nextRepresentativesRewards := map[string]sdk.Dec{} + representativesTokens := map[string]sdk.Int{} + + for _, representative := range stakingKeeper.GetAllValidators(s.consumerCtx()) { + currentRepresentativesRewards[representative.OperatorAddress] = sdk.NewDec(0) + nextRepresentativesRewards[representative.OperatorAddress] = sdk.NewDec(0) + representativesTokens[representative.OperatorAddress] = representative.GetTokens() + } + + distrModuleAccount := distrKeeper.GetDistributionAccount(s.consumerCtx()) + providerRedistributeAccount := authKeeper.GetModuleAccount(s.consumerCtx(), consumertypes.ConsumerToSendToProviderName) + //balance of consumer redistribute address will always be 0 when checked between 2 NextBlock() calls + + currentDistrModuleAccountBalance := sdk.NewDecFromInt(bankKeeper.GetBalance(s.consumerCtx(), distrModuleAccount.GetAddress(), bondDenom).Amount) + currentProviderFeeAccountBalance := sdk.NewDecFromInt(bankKeeper.GetBalance(s.consumerCtx(), providerRedistributeAccount.GetAddress(), bondDenom).Amount) + currentCommunityPoolBalance := distrKeeper.GetFeePoolCommunityCoins(s.consumerCtx()).AmountOf(bondDenom) + for key := range currentRepresentativesRewards { + representativeAddr, _ := sdk.ValAddressFromBech32(key) + representativeReward := distrKeeper.GetValidatorOutstandingRewards(s.consumerCtx(), representativeAddr).Rewards.AmountOf(bondDenom) + currentRepresentativesRewards[key] = representativeReward + } + + s.consumerChain.NextBlock() + + nextDistrModuleAccountBalance := sdk.NewDecFromInt(bankKeeper.GetBalance(s.consumerCtx(), distrModuleAccount.GetAddress(), bondDenom).Amount) + nextProviderFeeAccountBalance := sdk.NewDecFromInt(bankKeeper.GetBalance(s.consumerCtx(), providerRedistributeAccount.GetAddress(), bondDenom).Amount) + nextCommunityPoolBalance := distrKeeper.GetFeePoolCommunityCoins(s.consumerCtx()).AmountOf(bondDenom) + for key := range nextRepresentativesRewards { + representativeAddr, _ := sdk.ValAddressFromBech32(key) + representativeReward := distrKeeper.GetValidatorOutstandingRewards(s.consumerCtx(), representativeAddr).Rewards.AmountOf(bondDenom) + nextRepresentativesRewards[key] = representativeReward + } + + distrModuleDifference := nextDistrModuleAccountBalance.Sub(currentDistrModuleAccountBalance) + providerDifference := nextProviderFeeAccountBalance.Sub(currentProviderFeeAccountBalance) + communityPoolDifference := nextCommunityPoolBalance.Sub(currentCommunityPoolBalance) + representativeDifference := map[string]sdk.Dec{} + consumerRedistributeDifference := communityPoolDifference + + for key, currentReward := range currentRepresentativesRewards { + representativeDifference[key] = nextRepresentativesRewards[key].Sub(currentReward) + consumerRedistributeDifference = consumerRedistributeDifference.Add(representativeDifference[key]) + } + + //confirm that the total amount given to the community pool plus all representatives is equal to the total amount taken out of distribution + s.Require().Equal(distrModuleDifference, consumerRedistributeDifference) + //confirm that the percentage given to the community pool is equal to the configured community tax percentage. + s.Require().Equal(communityPoolDifference.Quo(consumerRedistributeDifference), distrKeeper.GetCommunityTax(s.consumerCtx())) + //check that the fraction actually kept by the consumer is the correct fraction. using InEpsilon because the math code uses truncations + s.Require().InEpsilon(distrModuleDifference.Quo(providerDifference.Add(distrModuleDifference)).MustFloat64(), consumerFraction.MustFloat64(), float64(0.0001)) + //check that the fraction actually kept by the provider is the correct fraction. using InEpsilon because the math code uses truncations + s.Require().InEpsilon(providerDifference.Quo(providerDifference.Add(distrModuleDifference)).MustFloat64(), sdk.NewDec(1).Sub(consumerFraction).MustFloat64(), float64(0.0001)) + + totalRepresentativePower := stakingKeeper.GetValidatorSet().TotalBondedTokens(s.consumerCtx()) + + //check that each representative has gotten the correct amount of rewards + for key, representativeTokens := range representativesTokens { + powerFraction := sdk.NewDecFromInt(representativeTokens).QuoTruncate(sdk.NewDecFromInt(totalRepresentativePower)) + s.Require().Equal(powerFraction, representativeDifference[key].Quo(consumerRedistributeDifference.Sub(communityPoolDifference))) + } +} + +func (s *ConsumerDemocracyTestSuite) providerCtx() sdk.Context { + return s.providerChain.GetContext() +} + +func (s *ConsumerDemocracyTestSuite) consumerCtx() sdk.Context { + return s.consumerChain.GetContext() +} diff --git a/tests/e2e/distribution_test.go b/tests/e2e/distribution_test.go index 32ccb9824b..5eb7b12b77 100644 --- a/tests/e2e/distribution_test.go +++ b/tests/e2e/distribution_test.go @@ -14,10 +14,11 @@ import ( ) //This test is valid for minimal viable consumer chain -func (s *ProviderTestSuite) TestRewardsDistribution() { +func (s *CCVTestSuite) TestRewardsDistribution() { //set up channel and delegate some tokens in order for validator set update to be sent to the consumer chain s.SetupCCVChannel() + s.SetupTransferChannel() bondAmt := sdk.NewInt(10000000) delAddr := s.providerChain.SenderAccount.GetAddress() delegate(s, delAddr, bondAmt) diff --git a/tests/e2e/normal_operations_test.go b/tests/e2e/normal_operations_test.go index 4acd881563..1c90efaf50 100644 --- a/tests/e2e/normal_operations_test.go +++ b/tests/e2e/normal_operations_test.go @@ -8,7 +8,7 @@ import ( ) // Tests the tracking of historical info in the context of new blocks being committed -func (k ConsumerKeeperTestSuite) TestTrackHistoricalInfo() { +func (k CCVTestSuite) TestTrackHistoricalInfo() { consumerKeeper := k.consumerChain.App.(*appConsumer.App).ConsumerKeeper cCtx := k.consumerChain.GetContext @@ -19,7 +19,7 @@ func (k ConsumerKeeperTestSuite) TestTrackHistoricalInfo() { // define an utility function that creates a new cross-chain validator // and then call track historical info in the next block - createVal := func(k ConsumerKeeperTestSuite) { + createVal := func(k CCVTestSuite) { // add new validator to consumer states pk := ed25519.GenPrivKey().PubKey() cVal, err := types.NewCCValidator(pk.Address(), int64(1), pk) @@ -35,10 +35,10 @@ func (k ConsumerKeeperTestSuite) TestTrackHistoricalInfo() { // increased by HistoricalEntries in order to prune the historical info less or equal to the current block height // Note that historical info containing the created validators are stored during the next block BeginBlocker // and thus are indexed with the respective block heights InitHeight+1 and InitHeight+2 - testSetup := []func(ConsumerKeeperTestSuite){ + testSetup := []func(CCVTestSuite){ createVal, createVal, - func(k ConsumerKeeperTestSuite) { + func(k CCVTestSuite) { newHeight := k.consumerChain.GetContext().BlockHeight() + int64(types.HistoricalEntries) header := tmproto.Header{ ChainID: "HelloChain", diff --git a/tests/e2e/setup_test.go b/tests/e2e/setup_test.go index 5e06f0cd8a..3fd0522729 100644 --- a/tests/e2e/setup_test.go +++ b/tests/e2e/setup_test.go @@ -6,7 +6,6 @@ import ( "bytes" "testing" - sdk "github.com/cosmos/cosmos-sdk/types" ccv "github.com/cosmos/interchain-security/x/ccv/types" "github.com/cosmos/interchain-security/x/ccv/utils" @@ -24,24 +23,22 @@ import ( "github.com/stretchr/testify/suite" ) -type ProviderTestSuite struct { +type CCVTestSuite struct { suite.Suite - - coordinator *ibctesting.Coordinator - - // testing chains - providerChain *ibctesting.TestChain - consumerChain *ibctesting.TestChain - - path *ibctesting.Path - transferPath *ibctesting.Path + coordinator *ibctesting.Coordinator + providerChain *ibctesting.TestChain + consumerChain *ibctesting.TestChain + providerClient *ibctmtypes.ClientState + providerConsState *ibctmtypes.ConsensusState + path *ibctesting.Path + transferPath *ibctesting.Path } -func TestProviderTestSuite(t *testing.T) { - suite.Run(t, new(ProviderTestSuite)) +func TestCCVTestSuite(t *testing.T) { + suite.Run(t, new(CCVTestSuite)) } -func (suite *ProviderTestSuite) SetupTest() { +func (suite *CCVTestSuite) SetupTest() { suite.coordinator, suite.providerChain, suite.consumerChain = simapp.NewProviderConsumerCoordinator(suite.T()) // valsets must match @@ -76,6 +73,8 @@ func (suite *ProviderTestSuite) SetupTest() { ) suite.Require().True(found, "consumer genesis not found") suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.InitGenesis(suite.consumerChain.GetContext(), &consumerGenesis) + suite.providerClient = consumerGenesis.ProviderClientState + suite.providerConsState = consumerGenesis.ProviderConsensusState // create path for the CCV channel suite.path = ibctesting.NewPath(suite.consumerChain, suite.providerChain) @@ -86,6 +85,7 @@ func (suite *ProviderTestSuite) SetupTest() { suite.providerCtx(), suite.consumerChain.ChainID, ) + suite.Require().True(found, "consumer client not found") suite.path.EndpointB.ClientID = consumerClient // - set consumer endpoint's clientID @@ -122,13 +122,12 @@ func (suite *ProviderTestSuite) SetupTest() { suite.transferPath.EndpointB.ChannelConfig.Version = transfertypes.Version } -func (suite *ProviderTestSuite) SetupCCVChannel() { +func (suite *CCVTestSuite) SetupCCVChannel() { suite.StartSetupCCVChannel() suite.CompleteSetupCCVChannel() - suite.SetupTransferChannel() } -func (suite *ProviderTestSuite) StartSetupCCVChannel() { +func (suite *CCVTestSuite) StartSetupCCVChannel() { suite.coordinator.CreateConnections(suite.path) err := suite.path.EndpointA.ChanOpenInit() @@ -138,7 +137,7 @@ func (suite *ProviderTestSuite) StartSetupCCVChannel() { suite.Require().NoError(err) } -func (suite *ProviderTestSuite) CompleteSetupCCVChannel() { +func (suite *CCVTestSuite) CompleteSetupCCVChannel() { err := suite.path.EndpointA.ChanOpenAck() suite.Require().NoError(err) @@ -150,7 +149,7 @@ func (suite *ProviderTestSuite) CompleteSetupCCVChannel() { suite.Require().NoError(err) } -func (suite *ProviderTestSuite) SetupTransferChannel() { +func (suite *CCVTestSuite) SetupTransferChannel() { // transfer path will use the same connection as ccv path suite.transferPath.EndpointA.ClientID = suite.path.EndpointA.ClientID @@ -177,296 +176,3 @@ func (suite *ProviderTestSuite) SetupTransferChannel() { err = suite.transferPath.EndpointA.UpdateClient() suite.Require().NoError(err) } - -// TODO: Can this be consolidated with ProviderTestSuite above? -type ProviderKeeperTestSuite struct { - suite.Suite - coordinator *ibctesting.Coordinator - - // testing chains - providerChain *ibctesting.TestChain - consumerChain *ibctesting.TestChain - path *ibctesting.Path - ctx sdk.Context -} - -func TestProviderKeeperTestSuite(t *testing.T) { - suite.Run(t, new(ProviderKeeperTestSuite)) -} - -func (suite *ProviderKeeperTestSuite) SetupTest() { - suite.coordinator, suite.providerChain, suite.consumerChain = simapp.NewProviderConsumerCoordinator(suite.T()) - - // valsets must match - providerValUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.providerChain.Vals) - consumerValUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.consumerChain.Vals) - suite.Require().True(len(providerValUpdates) == len(consumerValUpdates), "initial valset not matching") - for i := 0; i < len(providerValUpdates); i++ { - addr1 := utils.GetChangePubKeyAddress(providerValUpdates[i]) - addr2 := utils.GetChangePubKeyAddress(consumerValUpdates[i]) - suite.Require().True(bytes.Equal(addr1, addr2), "validator mismatch") - } - - // move both chains to the next block - suite.providerChain.NextBlock() - suite.consumerChain.NextBlock() - - // create consumer client on provider chain and set as consumer client for consumer chainID in provider keeper. - err := suite.providerChain.App.(*appProvider.App).ProviderKeeper.CreateConsumerClient( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - suite.consumerChain.LastHeader.GetHeight().(clienttypes.Height), - false, - ) - suite.Require().NoError(err) - // move provider to next block to commit the state - suite.providerChain.NextBlock() - - // initialize the consumer chain with the genesis state stored on the provider - consumerGenesis, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerGenesis( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - ) - suite.Require().True(found, "consumer genesis not found") - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.InitGenesis(suite.consumerChain.GetContext(), &consumerGenesis) - - // create path for the CCV channel - suite.path = ibctesting.NewPath(suite.consumerChain, suite.providerChain) - - // update CCV path with correct info - // - set provider endpoint's clientID - consumerClient, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerClientId( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - ) - suite.Require().True(found, "consumer client not found") - suite.path.EndpointB.ClientID = consumerClient - // - set consumer endpoint's clientID - providerClient, found := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderClientID(suite.consumerChain.GetContext()) - suite.Require().True(found, "provider client not found") - suite.path.EndpointA.ClientID = providerClient - // - client config - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - suite.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = providerUnbondingPeriod - suite.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = providerUnbondingPeriod / utils.TrustingPeriodFraction - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - suite.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = consumerUnbondingPeriod - suite.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = consumerUnbondingPeriod / utils.TrustingPeriodFraction - // - channel config - suite.path.EndpointA.ChannelConfig.PortID = ccv.ConsumerPortID - suite.path.EndpointB.ChannelConfig.PortID = ccv.ProviderPortID - suite.path.EndpointA.ChannelConfig.Version = ccv.Version - suite.path.EndpointB.ChannelConfig.Version = ccv.Version - suite.path.EndpointA.ChannelConfig.Order = channeltypes.ORDERED - suite.path.EndpointB.ChannelConfig.Order = channeltypes.ORDERED - - // set chains sender account number - // TODO: to be fixed in #151 - err = suite.path.EndpointB.Chain.SenderAccount.SetAccountNumber(6) - suite.Require().NoError(err) - err = suite.path.EndpointA.Chain.SenderAccount.SetAccountNumber(1) - suite.Require().NoError(err) - - suite.ctx = suite.providerChain.GetContext() -} - -type ConsumerTestSuite struct { - suite.Suite - - coordinator *ibctesting.Coordinator - - // testing chains - providerChain *ibctesting.TestChain - consumerChain *ibctesting.TestChain - - path *ibctesting.Path - - ctx sdk.Context -} - -func TestConsumerTestSuite(t *testing.T) { - suite.Run(t, new(ConsumerTestSuite)) -} - -func (suite *ConsumerTestSuite) SetupTest() { - suite.coordinator, suite.providerChain, suite.consumerChain = simapp.NewProviderConsumerCoordinator(suite.T()) - - // valsets must match - providerValUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.providerChain.Vals) - consumerValUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.consumerChain.Vals) - suite.Require().True(len(providerValUpdates) == len(consumerValUpdates), "initial valset not matching") - for i := 0; i < len(providerValUpdates); i++ { - addr1 := utils.GetChangePubKeyAddress(providerValUpdates[i]) - addr2 := utils.GetChangePubKeyAddress(consumerValUpdates[i]) - suite.Require().True(bytes.Equal(addr1, addr2), "validator mismatch") - } - - // move both chains to the next block - suite.providerChain.NextBlock() - suite.consumerChain.NextBlock() - - // create consumer client on provider chain and set as consumer client for consumer chainID in provider keeper. - err := suite.providerChain.App.(*appProvider.App).ProviderKeeper.CreateConsumerClient( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - suite.consumerChain.LastHeader.GetHeight().(clienttypes.Height), - false, - ) - suite.Require().NoError(err) - // move provider to next block to commit the state - suite.providerChain.NextBlock() - - // initialize the consumer chain with the genesis state stored on the provider - consumerGenesis, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerGenesis( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - ) - suite.Require().True(found, "consumer genesis not found") - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.InitGenesis(suite.consumerChain.GetContext(), &consumerGenesis) - - // create path for the CCV channel - suite.path = ibctesting.NewPath(suite.consumerChain, suite.providerChain) - - // update CCV path with correct info - // - set provider endpoint's clientID - consumerClient, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerClientId( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - ) - suite.Require().True(found, "consumer client not found") - suite.path.EndpointB.ClientID = consumerClient - // - set consumer endpoint's clientID - providerClient, found := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderClientID(suite.consumerChain.GetContext()) - suite.Require().True(found, "provider client not found") - suite.path.EndpointA.ClientID = providerClient - // - client config - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - suite.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = providerUnbondingPeriod - suite.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = providerUnbondingPeriod / utils.TrustingPeriodFraction - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - suite.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = consumerUnbondingPeriod - suite.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = consumerUnbondingPeriod / utils.TrustingPeriodFraction - // - channel config - suite.path.EndpointA.ChannelConfig.PortID = ccv.ConsumerPortID - suite.path.EndpointB.ChannelConfig.PortID = ccv.ProviderPortID - suite.path.EndpointA.ChannelConfig.Version = ccv.Version - suite.path.EndpointB.ChannelConfig.Version = ccv.Version - suite.path.EndpointA.ChannelConfig.Order = channeltypes.ORDERED - suite.path.EndpointB.ChannelConfig.Order = channeltypes.ORDERED - - // set chains sender account number - // TODO: to be fixed in #151 - err = suite.path.EndpointB.Chain.SenderAccount.SetAccountNumber(6) - suite.Require().NoError(err) - err = suite.path.EndpointA.Chain.SenderAccount.SetAccountNumber(1) - suite.Require().NoError(err) - - suite.ctx = suite.consumerChain.GetContext() - - suite.coordinator.CreateConnections(suite.path) -} - -// TODO: Can this be consolidated with ConsumerTestSuite above? -type ConsumerKeeperTestSuite struct { - suite.Suite - - coordinator *ibctesting.Coordinator - - // testing chains - providerChain *ibctesting.TestChain - consumerChain *ibctesting.TestChain - - providerClient *ibctmtypes.ClientState - providerConsState *ibctmtypes.ConsensusState - - path *ibctesting.Path - - ctx sdk.Context -} - -func TestConsumerKeeperTestSuite(t *testing.T) { - suite.Run(t, new(ConsumerKeeperTestSuite)) -} - -func (suite *ConsumerKeeperTestSuite) SetupTest() { - suite.coordinator, suite.providerChain, suite.consumerChain = simapp.NewProviderConsumerCoordinator(suite.T()) - - // valsets must match - providerValUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.providerChain.Vals) - consumerValUpdates := tmtypes.TM2PB.ValidatorUpdates(suite.consumerChain.Vals) - suite.Require().True(len(providerValUpdates) == len(consumerValUpdates), "initial valset not matching") - for i := 0; i < len(providerValUpdates); i++ { - addr1 := utils.GetChangePubKeyAddress(providerValUpdates[i]) - addr2 := utils.GetChangePubKeyAddress(consumerValUpdates[i]) - suite.Require().True(bytes.Equal(addr1, addr2), "validator mismatch") - } - - // move both chains to the next block - suite.providerChain.NextBlock() - suite.consumerChain.NextBlock() - - // create consumer client on provider chain and set as consumer client for consumer chainID in provider keeper. - err := suite.providerChain.App.(*appProvider.App).ProviderKeeper.CreateConsumerClient( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - suite.consumerChain.LastHeader.GetHeight().(clienttypes.Height), - false, - ) - suite.Require().NoError(err) - // move provider to next block to commit the state - suite.providerChain.NextBlock() - - // initialize the consumer chain with the genesis state stored on the provider - consumerGenesis, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerGenesis( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - ) - suite.Require().True(found, "consumer genesis not found") - suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.InitGenesis(suite.consumerChain.GetContext(), &consumerGenesis) - suite.providerClient = consumerGenesis.ProviderClientState - suite.providerConsState = consumerGenesis.ProviderConsensusState - - // create path for the CCV channel - suite.path = ibctesting.NewPath(suite.consumerChain, suite.providerChain) - - // update CCV path with correct info - // - set provider endpoint's clientID - consumerClient, found := suite.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerClientId( - suite.providerChain.GetContext(), - suite.consumerChain.ChainID, - ) - suite.Require().True(found, "consumer client not found") - suite.path.EndpointB.ClientID = consumerClient - // - set consumer endpoint's clientID - providerClient, found := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderClientID(suite.consumerChain.GetContext()) - suite.Require().True(found, "provider client not found") - suite.path.EndpointA.ClientID = providerClient - // - client config - providerUnbondingPeriod := suite.providerChain.App.(*appProvider.App).GetStakingKeeper().UnbondingTime(suite.providerChain.GetContext()) - suite.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = providerUnbondingPeriod - suite.path.EndpointB.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = providerUnbondingPeriod / utils.TrustingPeriodFraction - consumerUnbondingPeriod := utils.ComputeConsumerUnbondingPeriod(providerUnbondingPeriod) - suite.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).UnbondingPeriod = consumerUnbondingPeriod - suite.path.EndpointA.ClientConfig.(*ibctesting.TendermintConfig).TrustingPeriod = consumerUnbondingPeriod / utils.TrustingPeriodFraction - // - channel config - suite.path.EndpointA.ChannelConfig.PortID = ccv.ConsumerPortID - suite.path.EndpointB.ChannelConfig.PortID = ccv.ProviderPortID - suite.path.EndpointA.ChannelConfig.Version = ccv.Version - suite.path.EndpointB.ChannelConfig.Version = ccv.Version - suite.path.EndpointA.ChannelConfig.Order = channeltypes.ORDERED - suite.path.EndpointB.ChannelConfig.Order = channeltypes.ORDERED - - // set chains sender account number - // TODO: to be fixed in #151 - err = suite.path.EndpointB.Chain.SenderAccount.SetAccountNumber(6) - suite.Require().NoError(err) - err = suite.path.EndpointA.Chain.SenderAccount.SetAccountNumber(1) - suite.Require().NoError(err) - - suite.ctx = suite.consumerChain.GetContext() -} - -func (suite *ConsumerKeeperTestSuite) SetupCCVChannel() { - suite.coordinator.CreateConnections(suite.path) - suite.coordinator.CreateChannels(suite.path) -} diff --git a/tests/e2e/slashing_test.go b/tests/e2e/slashing_test.go index bf861d99af..1e7e5fd6af 100644 --- a/tests/e2e/slashing_test.go +++ b/tests/e2e/slashing_test.go @@ -23,8 +23,9 @@ import ( ) // TestSendDowntimePacket tests consumer initiated slashing -func (s *ProviderTestSuite) TestSendSlashPacketDowntime() { +func (s *CCVTestSuite) TestSendSlashPacketDowntime() { s.SetupCCVChannel() + s.SetupTransferChannel() validatorsPerChain := len(s.consumerChain.Vals.Validators) providerStakingKeeper := s.providerChain.App.(*appProvider.App).StakingKeeper @@ -143,8 +144,9 @@ func (s *ProviderTestSuite) TestSendSlashPacketDowntime() { s.Require().NoError(err) } -func (s *ProviderTestSuite) TestSendSlashPacketDoubleSign() { +func (s *CCVTestSuite) TestSendSlashPacketDoubleSign() { s.SetupCCVChannel() + s.SetupTransferChannel() validatorsPerChain := len(s.consumerChain.Vals.Validators) providerStakingKeeper := s.providerChain.App.(*appProvider.App).StakingKeeper @@ -253,11 +255,12 @@ func (s *ProviderTestSuite) TestSendSlashPacketDoubleSign() { s.Require().True(valSignInfo.JailedUntil.Equal(evidencetypes.DoubleSignJailEndTime)) } -func (s *ProviderTestSuite) TestSlashPacketAcknowldgement() { +func (s *CCVTestSuite) TestSlashPacketAcknowldgement() { providerKeeper := s.providerChain.App.(*appProvider.App).ProviderKeeper consumerKeeper := s.consumerChain.App.(*appConsumer.App).ConsumerKeeper s.SetupCCVChannel() + s.SetupTransferChannel() packet := channeltypes.NewPacket([]byte{}, 1, ccv.ConsumerPortID, s.path.EndpointA.ChannelID, ccv.ProviderPortID, s.path.EndpointB.ChannelID, clienttypes.Height{}, 0) @@ -273,7 +276,7 @@ func (s *ProviderTestSuite) TestSlashPacketAcknowldgement() { } // TestHandleSlashPacketDoubleSigning tests the handling of a double-signing related slash packet, with e2e tests -func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketDoubleSigning() { +func (suite *CCVTestSuite) TestHandleSlashPacketDoubleSigning() { providerKeeper := suite.providerChain.App.(*appProvider.App).ProviderKeeper providerSlashingKeeper := suite.providerChain.App.(*appProvider.App).SlashingKeeper providerStakingKeeper := suite.providerChain.App.(*appProvider.App).StakingKeeper @@ -282,21 +285,21 @@ func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketDoubleSigning() { consAddr := sdk.ConsAddress(tmVal.Address) // check that validator bonded status - validator, found := providerStakingKeeper.GetValidatorByConsAddr(suite.ctx, consAddr) + validator, found := providerStakingKeeper.GetValidatorByConsAddr(suite.providerCtx(), consAddr) suite.Require().True(found) suite.Require().Equal(stakingtypes.Bonded, validator.GetStatus()) // set init VSC id for chain0 - providerKeeper.SetInitChainHeight(suite.ctx, suite.consumerChain.ChainID, uint64(suite.ctx.BlockHeight())) + providerKeeper.SetInitChainHeight(suite.providerCtx(), suite.consumerChain.ChainID, uint64(suite.providerCtx().BlockHeight())) // set validator signing-info providerSlashingKeeper.SetValidatorSigningInfo( - suite.ctx, + suite.providerCtx(), consAddr, slashingtypes.ValidatorSigningInfo{Address: consAddr.String()}, ) - _, err := providerKeeper.HandleSlashPacket(suite.ctx, suite.consumerChain.ChainID, + _, err := providerKeeper.HandleSlashPacket(suite.providerCtx(), suite.consumerChain.ChainID, ccv.NewSlashPacketData( abci.Validator{Address: tmVal.Address, Power: 0}, uint64(0), @@ -306,35 +309,35 @@ func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketDoubleSigning() { suite.NoError(err) // verify that validator is jailed in the staking and slashing mdodules' states - suite.Require().True(providerStakingKeeper.IsValidatorJailed(suite.ctx, consAddr)) + suite.Require().True(providerStakingKeeper.IsValidatorJailed(suite.providerCtx(), consAddr)) - signingInfo, _ := providerSlashingKeeper.GetValidatorSigningInfo(suite.ctx, consAddr) + signingInfo, _ := providerSlashingKeeper.GetValidatorSigningInfo(suite.providerCtx(), consAddr) suite.Require().True(signingInfo.JailedUntil.Equal(evidencetypes.DoubleSignJailEndTime)) suite.Require().True(signingInfo.Tombstoned) } // TestHandleSlashPacketErrors tests errors for the HandleSlashPacket method in an e2e testing setting -func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketErrors() { +func (suite *CCVTestSuite) TestHandleSlashPacketErrors() { providerStakingKeeper := suite.providerChain.App.(*appProvider.App).StakingKeeper ProviderKeeper := suite.providerChain.App.(*appProvider.App).ProviderKeeper providerSlashingKeeper := suite.providerChain.App.(*appProvider.App).SlashingKeeper consumerChainID := suite.consumerChain.ChainID // sync contexts block height - suite.ctx = suite.providerChain.GetContext() + ctx := suite.providerCtx() // expect an error if initial block height isn't set for consumer chain - _, err := ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, ccv.SlashPacketData{}) + _, err := ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, ccv.SlashPacketData{}) suite.Require().Error(err, "slash validator with invalid infraction height") // save VSC ID - vID := ProviderKeeper.GetValidatorSetUpdateId(suite.ctx) + vID := ProviderKeeper.GetValidatorSetUpdateId(ctx) // remove block height for current VSC ID - ProviderKeeper.DeleteValsetUpdateBlockHeight(suite.ctx, vID) + ProviderKeeper.DeleteValsetUpdateBlockHeight(ctx, vID) // expect an error if block height mapping VSC ID is zero - _, err = ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, ccv.SlashPacketData{ValsetUpdateId: vID}) + _, err = ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, ccv.SlashPacketData{ValsetUpdateId: vID}) suite.Require().Error(err, "slash with height mapping to zero") // construct slashing packet with non existing validator @@ -344,34 +347,34 @@ func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketErrors() { ) // Set initial block height for consumer chain - ProviderKeeper.SetInitChainHeight(suite.ctx, consumerChainID, uint64(suite.ctx.BlockHeight())) + ProviderKeeper.SetInitChainHeight(ctx, consumerChainID, uint64(ctx.BlockHeight())) // expect the slash to not succeed if validator doesn't exist - success, err := ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, slashingPkt) + success, err := ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, slashingPkt) suite.Require().NoError(err, "slashing an unknown validator should not result in error") suite.Require().False(success, "did slash unknown validator") // jail an existing validator val := suite.providerChain.Vals.Validators[0] consAddr := sdk.ConsAddress(val.Address) - providerStakingKeeper.Jail(suite.ctx, consAddr) + providerStakingKeeper.Jail(ctx, consAddr) // commit block to set VSC ID suite.coordinator.CommitBlock(suite.providerChain) // Update suite.ctx bc CommitBlock updates only providerChain's current header block height - suite.ctx = suite.providerChain.GetContext() - suite.Require().NotZero(ProviderKeeper.GetValsetUpdateBlockHeight(suite.ctx, vID)) + ctx = suite.providerChain.GetContext() + suite.Require().NotZero(ProviderKeeper.GetValsetUpdateBlockHeight(ctx, vID)) // create validator signing info - valInfo := slashingtypes.NewValidatorSigningInfo(sdk.ConsAddress(val.Address), suite.ctx.BlockHeight(), - suite.ctx.BlockHeight()-1, time.Time{}.UTC(), false, int64(0)) - providerSlashingKeeper.SetValidatorSigningInfo(suite.ctx, sdk.ConsAddress(val.Address), valInfo) + valInfo := slashingtypes.NewValidatorSigningInfo(sdk.ConsAddress(val.Address), ctx.BlockHeight(), + ctx.BlockHeight()-1, time.Time{}.UTC(), false, int64(0)) + providerSlashingKeeper.SetValidatorSigningInfo(ctx, sdk.ConsAddress(val.Address), valInfo) // update validator address and VSC ID slashingPkt.Validator.Address = val.Address slashingPkt.ValsetUpdateId = vID // expect to slash and jail validator - _, err = ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, slashingPkt) + _, err = ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, slashingPkt) suite.Require().NoError(err, "did slash jail validator") // expect error when infraction type in unspecified @@ -380,25 +383,25 @@ func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketErrors() { slashingPkt.Infraction = stakingtypes.InfractionEmpty valInfo.Address = sdk.ConsAddress(tmAddr).String() - providerSlashingKeeper.SetValidatorSigningInfo(suite.ctx, sdk.ConsAddress(tmAddr), valInfo) + providerSlashingKeeper.SetValidatorSigningInfo(ctx, sdk.ConsAddress(tmAddr), valInfo) - _, err = ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, slashingPkt) + _, err = ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, slashingPkt) suite.Require().EqualError(err, fmt.Sprintf("invalid infraction type: %v", stakingtypes.InfractionEmpty)) // expect to slash jail validator slashingPkt.Infraction = stakingtypes.DoubleSign - _, err = ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, slashingPkt) + _, err = ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, slashingPkt) suite.Require().NoError(err) // expect the slash to not succeed when validator is tombstoned - success, _ = ProviderKeeper.HandleSlashPacket(suite.ctx, consumerChainID, slashingPkt) + success, _ = ProviderKeeper.HandleSlashPacket(ctx, consumerChainID, slashingPkt) suite.Require().False(success) } // TestHandleSlashPacketDistribution tests the slashing of an undelegation balance // by varying the slash packet VSC ID mapping to infraction heights // lesser, equal or greater than the undelegation entry creation height -func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketDistribution() { +func (suite *CCVTestSuite) TestHandleSlashPacketDistribution() { providerStakingKeeper := suite.providerChain.App.(*appProvider.App).StakingKeeper providerKeeper := suite.providerChain.App.(*appProvider.App).ProviderKeeper @@ -419,20 +422,20 @@ func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketDistribution() { // setup the test with a delegation, a no-op and an undelegation setupOperations := []struct { - fn func(suite *ProviderKeeperTestSuite) error + fn func(suite *CCVTestSuite) error }{ { - func(suite *ProviderKeeperTestSuite) error { + func(suite *CCVTestSuite) error { testShares, err = providerStakingKeeper.Delegate(suite.providerChain.GetContext(), delAddr, bondAmt, stakingtypes.Unbonded, stakingtypes.Validator(validator), true) return err }, }, { - func(suite *ProviderKeeperTestSuite) error { + func(suite *CCVTestSuite) error { return nil }, }, { // undelegate a quarter of the new shares created - func(suite *ProviderKeeperTestSuite) error { + func(suite *CCVTestSuite) error { _, err = providerStakingKeeper.Undelegate(suite.providerChain.GetContext(), delAddr, valAddr, testShares.QuoInt64(4)) return err }, @@ -499,81 +502,84 @@ func (suite *ProviderKeeperTestSuite) TestHandleSlashPacketDistribution() { // TestValidatorDowntime tests if a slash packet is sent // and if the outstanding slashing flag is switched // when a validator has downtime on the slashing module -func (suite *ConsumerKeeperTestSuite) TestValidatorDowntime() { +func (suite *CCVTestSuite) TestValidatorDowntime() { // initial setup suite.SetupCCVChannel() suite.SendEmptyVSCPacket() // sync suite context after CCV channel is established - suite.ctx = suite.consumerChain.GetContext() + ctx := suite.consumerCtx() app := suite.consumerChain.App.(*appConsumer.App) channelID := suite.path.EndpointA.ChannelID // pick a cross-chain validator - vals := app.ConsumerKeeper.GetAllCCValidator(suite.ctx) + vals := app.ConsumerKeeper.GetAllCCValidator(ctx) consAddr := sdk.ConsAddress(vals[0].Address) // save next sequence before sending a slash packet - seq, ok := app.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(suite.ctx, ccv.ConsumerPortID, channelID) + seq, ok := app.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctx, ccv.ConsumerPortID, channelID) suite.Require().True(ok) // Sign 100 blocks valPower := int64(1) - height, signedBlocksWindow := int64(0), app.SlashingKeeper.SignedBlocksWindow(suite.ctx) + height, signedBlocksWindow := int64(0), app.SlashingKeeper.SignedBlocksWindow(ctx) for ; height < signedBlocksWindow; height++ { - suite.ctx = suite.ctx.WithBlockHeight(height) - app.SlashingKeeper.HandleValidatorSignature(suite.ctx, vals[0].Address, valPower, true) + ctx = ctx.WithBlockHeight(height) + app.SlashingKeeper.HandleValidatorSignature(ctx, vals[0].Address, valPower, true) } - missedBlockThreshold := (2 * signedBlocksWindow) - app.SlashingKeeper.MinSignedPerWindow(suite.ctx) + missedBlockThreshold := (2 * signedBlocksWindow) - app.SlashingKeeper.MinSignedPerWindow(ctx) + ctx = suite.consumerCtx() // construct slash packet to be sent and get its commit packetData := ccv.NewSlashPacketData( abci.Validator{Address: vals[0].Address, Power: valPower}, // get the VSC ID mapping the infraction height - app.ConsumerKeeper.GetHeightValsetUpdateID(suite.ctx, uint64(missedBlockThreshold-sdk.ValidatorUpdateDelay-1)), + app.ConsumerKeeper.GetHeightValsetUpdateID(ctx, uint64(missedBlockThreshold-sdk.ValidatorUpdateDelay-1)), stakingtypes.Downtime, ) - expCommit := suite.commitSlashPacket(suite.ctx, packetData) + expCommit := suite.commitSlashPacket(ctx, packetData) // Miss 50 blocks and expect a slash packet to be sent for ; height <= missedBlockThreshold; height++ { - suite.ctx = suite.ctx.WithBlockHeight(height) - app.SlashingKeeper.HandleValidatorSignature(suite.ctx, vals[0].Address, valPower, false) + ctx = ctx.WithBlockHeight(height) + app.SlashingKeeper.HandleValidatorSignature(ctx, vals[0].Address, valPower, false) } + ctx = suite.consumerCtx() + // check validator signing info - res, _ := app.SlashingKeeper.GetValidatorSigningInfo(suite.ctx, consAddr) + res, _ := app.SlashingKeeper.GetValidatorSigningInfo(ctx, consAddr) // expect increased jail time - suite.Require().True(res.JailedUntil.Equal(suite.ctx.BlockTime().Add(app.SlashingKeeper.DowntimeJailDuration(suite.ctx))), "did not update validator jailed until signing info") + suite.Require().True(res.JailedUntil.Equal(ctx.BlockTime().Add(app.SlashingKeeper.DowntimeJailDuration(ctx))), "did not update validator jailed until signing info") // expect missed block counters reseted suite.Require().Zero(res.MissedBlocksCounter, "did not reset validator missed block counter") suite.Require().Zero(res.IndexOffset) - app.SlashingKeeper.IterateValidatorMissedBlockBitArray(suite.ctx, consAddr, func(_ int64, missed bool) bool { + app.SlashingKeeper.IterateValidatorMissedBlockBitArray(ctx, consAddr, func(_ int64, missed bool) bool { suite.Require().True(missed) return false }) // verify that the slash packet was sent - gotCommit := app.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.ctx, ccv.ConsumerPortID, channelID, seq) + gotCommit := app.IBCKeeper.ChannelKeeper.GetPacketCommitment(ctx, ccv.ConsumerPortID, channelID, seq) suite.Require().NotNil(gotCommit, "did not found slash packet commitment") suite.Require().EqualValues(expCommit, gotCommit, "invalid slash packet commitment") // verify that the slash packet was sent - suite.Require().True(app.ConsumerKeeper.OutstandingDowntime(suite.ctx, consAddr)) + suite.Require().True(app.ConsumerKeeper.OutstandingDowntime(ctx, consAddr)) // check that the outstanding slashing flag prevents the jailed validator to keep missing block for ; height < missedBlockThreshold+signedBlocksWindow; height++ { - suite.ctx = suite.ctx.WithBlockHeight(height) - app.SlashingKeeper.HandleValidatorSignature(suite.ctx, vals[0].Address, valPower, false) + ctx = ctx.WithBlockHeight(height) + app.SlashingKeeper.HandleValidatorSignature(ctx, vals[0].Address, valPower, false) } - res, _ = app.SlashingKeeper.GetValidatorSigningInfo(suite.ctx, consAddr) + res, _ = app.SlashingKeeper.GetValidatorSigningInfo(ctx, consAddr) suite.Require().Zero(res.MissedBlocksCounter, "did not reset validator missed block counter") suite.Require().Zero(res.IndexOffset) - app.SlashingKeeper.IterateValidatorMissedBlockBitArray(suite.ctx, consAddr, func(_ int64, missed bool) bool { + app.SlashingKeeper.IterateValidatorMissedBlockBitArray(ctx, consAddr, func(_ int64, missed bool) bool { suite.Require().True(missed, "did not reset validator missed block bit array") return false }) @@ -581,13 +587,13 @@ func (suite *ConsumerKeeperTestSuite) TestValidatorDowntime() { // TestValidatorDoubleSigning tests if a slash packet is sent // when a double-signing evidence is handled by the evidence module -func (suite *ConsumerKeeperTestSuite) TestValidatorDoubleSigning() { +func (suite *CCVTestSuite) TestValidatorDoubleSigning() { // initial setup suite.SetupCCVChannel() suite.SendEmptyVSCPacket() // sync suite context after CCV channel is established - suite.ctx = suite.consumerChain.GetContext() + ctx := suite.consumerCtx() app := suite.consumerChain.App.(*appConsumer.App) channelID := suite.path.EndpointA.ChannelID @@ -598,7 +604,7 @@ func (suite *ConsumerKeeperTestSuite) TestValidatorDoubleSigning() { consAddr := sdk.ConsAddress(pubkey.Address()) // set an arbitrary infraction height - infractionHeight := suite.ctx.BlockHeight() - 1 + infractionHeight := ctx.BlockHeight() - 1 power := int64(100) // create evidence @@ -610,36 +616,36 @@ func (suite *ConsumerKeeperTestSuite) TestValidatorDoubleSigning() { } // add validator signing-info to the store - app.SlashingKeeper.SetValidatorSigningInfo(suite.ctx, consAddr, slashingtypes.ValidatorSigningInfo{ + app.SlashingKeeper.SetValidatorSigningInfo(ctx, consAddr, slashingtypes.ValidatorSigningInfo{ Address: consAddr.String(), Tombstoned: false, }) // save next sequence before sending a slash packet - seq, ok := app.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(suite.ctx, ccv.ConsumerPortID, channelID) + seq, ok := app.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctx, ccv.ConsumerPortID, channelID) suite.Require().True(ok) // construct slash packet data and get the expcted commit hash packetData := ccv.NewSlashPacketData( abci.Validator{Address: consAddr.Bytes(), Power: power}, // get VSC ID mapping to the infraction height with the TM delay substracted - app.ConsumerKeeper.GetHeightValsetUpdateID(suite.ctx, uint64(infractionHeight-sdk.ValidatorUpdateDelay)), + app.ConsumerKeeper.GetHeightValsetUpdateID(ctx, uint64(infractionHeight-sdk.ValidatorUpdateDelay)), stakingtypes.DoubleSign, ) - expCommit := suite.commitSlashPacket(suite.ctx, packetData) + expCommit := suite.commitSlashPacket(ctx, packetData) // expect to send slash packet when handling double-sign evidence - app.EvidenceKeeper.HandleEquivocationEvidence(suite.ctx, e) + app.EvidenceKeeper.HandleEquivocationEvidence(ctx, e) // check that slash packet is sent - gotCommit := app.IBCKeeper.ChannelKeeper.GetPacketCommitment(suite.ctx, ccv.ConsumerPortID, channelID, seq) + gotCommit := app.IBCKeeper.ChannelKeeper.GetPacketCommitment(ctx, ccv.ConsumerPortID, channelID, seq) suite.NotNil(gotCommit) suite.Require().EqualValues(expCommit, gotCommit) } // TestSendSlashPacket tests the functionality of SendSlashPacket and asserts state changes related to that method -func (suite *ConsumerKeeperTestSuite) TestSendSlashPacket() { +func (suite *CCVTestSuite) TestSendSlashPacket() { suite.SetupCCVChannel() app := suite.consumerChain.App.(*appConsumer.App) @@ -678,7 +684,7 @@ func (suite *ConsumerKeeperTestSuite) TestSendSlashPacket() { // verify that all requests are stored requests := app.ConsumerKeeper.GetPendingSlashRequests(ctx) - suite.Require().Len(requests, 16) + suite.Require().Len(requests.GetRequests(), 16) // save consumer next sequence seq, _ := app.GetIBCKeeper().ChannelKeeper.GetNextSequenceSend(ctx, ccv.ConsumerPortID, channelID) @@ -699,7 +705,7 @@ func (suite *ConsumerKeeperTestSuite) TestSendSlashPacket() { // check that outstanding downtime flags // are all set to true for validators slashed for downtime requests - for _, r := range requests { + for _, r := range requests.GetRequests() { downtime := r.Infraction == stakingtypes.Downtime if downtime { consAddr := sdk.ConsAddress(r.Packet.Validator.Address) @@ -709,12 +715,12 @@ func (suite *ConsumerKeeperTestSuite) TestSendSlashPacket() { // check that pending slash requests get cleared after being sent requests = app.ConsumerKeeper.GetPendingSlashRequests(ctx) - suite.Require().Len(requests, 0) + suite.Require().Len(requests.GetRequests(), 0) // check that slash requests aren't stored when channel is established app.ConsumerKeeper.SendSlashPacket(ctx, abci.Validator{}, 0, stakingtypes.Downtime) app.ConsumerKeeper.SendSlashPacket(ctx, abci.Validator{}, 0, stakingtypes.DoubleSign) requests = app.ConsumerKeeper.GetPendingSlashRequests(ctx) - suite.Require().Len(requests, 0) + suite.Require().Len(requests.GetRequests(), 0) } diff --git a/tests/e2e/stop_consumer_test.go b/tests/e2e/stop_consumer_test.go index e490d8c014..767b8d793d 100644 --- a/tests/e2e/stop_consumer_test.go +++ b/tests/e2e/stop_consumer_test.go @@ -1,19 +1,17 @@ package e2e_test import ( - "time" - sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" - clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" channeltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" + appConsumer "github.com/cosmos/interchain-security/app/consumer" appProvider "github.com/cosmos/interchain-security/app/provider" - providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" ccv "github.com/cosmos/interchain-security/x/ccv/types" abci "github.com/tendermint/tendermint/abci/types" ) -func (s *ProviderTestSuite) TestStopConsumerChain() { +// Tests the functionality of stopping a consumer chain at a higher level than unit tests +func (s *CCVTestSuite) TestStopConsumerChain() { // default consumer chain ID consumerChainID := s.consumerChain.ChainID @@ -45,22 +43,23 @@ func (s *ProviderTestSuite) TestStopConsumerChain() { // - undelegate the shares in four consecutive blocks evenly; create UnbondigOp and UnbondingOpIndex entries for the consumer chain ID // - set SlashAck and LockUnbondingOnTimeout states for the consumer chain ID setupOperations := []struct { - fn func(suite *ProviderTestSuite) error + fn func(suite *CCVTestSuite) error }{ { - func(suite *ProviderTestSuite) error { + func(suite *CCVTestSuite) error { suite.SetupCCVChannel() + suite.SetupTransferChannel() return nil }, }, { - func(suite *ProviderTestSuite) error { + func(suite *CCVTestSuite) error { testShares, err = s.providerChain.App.(*appProvider.App).StakingKeeper.Delegate(s.providerCtx(), delAddr, bondAmt, stakingtypes.Unbonded, stakingtypes.Validator(validator), true) return err }, }, { - func(suite *ProviderTestSuite) error { + func(suite *CCVTestSuite) error { for i := 0; i < ubdOpsNum; i++ { // undelegate one quarter of the shares _, err := s.providerChain.App.(*appProvider.App).StakingKeeper.Undelegate(s.providerCtx(), delAddr, valAddr, testShares.QuoInt64(int64(ubdOpsNum))) @@ -74,9 +73,10 @@ func (s *ProviderTestSuite) TestStopConsumerChain() { }, }, { - func(suite *ProviderTestSuite) error { + func(suite *CCVTestSuite) error { s.providerChain.App.(*appProvider.App).ProviderKeeper.SetSlashAcks(s.providerCtx(), consumerChainID, []string{"validator-1", "validator-2", "validator-3"}) s.providerChain.App.(*appProvider.App).ProviderKeeper.SetLockUnbondingOnTimeout(s.providerCtx(), consumerChainID) + s.providerChain.App.(*appProvider.App).ProviderKeeper.AppendPendingVSC(s.providerCtx(), consumerChainID, ccv.ValidatorSetChangePacketData{ValsetUpdateId: 1}) return nil }, }, @@ -95,109 +95,11 @@ func (s *ProviderTestSuite) TestStopConsumerChain() { s.checkConsumerChainIsRemoved(consumerChainID, false) } -func (s *ProviderTestSuite) TestStopConsumerChainProposal() { - var ( - ctx sdk.Context - proposal *providertypes.StopConsumerChainProposal - ok bool - ) - - chainID := s.consumerChain.ChainID - - testCases := []struct { - name string - malleate func(*ProviderTestSuite) - expPass bool - stopReached bool - }{ - { - "valid stop consumer chain proposal: stop time reached", func(suite *ProviderTestSuite) { - - // ctx blocktime is after proposal's stop time - ctx = s.providerCtx().WithBlockTime(time.Now().Add(time.Hour)) - content, err := providertypes.NewStopConsumerChainProposal("title", "description", chainID, time.Now()) - s.Require().NoError(err) - proposal, ok = content.(*providertypes.StopConsumerChainProposal) - s.Require().True(ok) - }, true, true, - }, - { - "valid proposal: stop time has not yet been reached", func(suite *ProviderTestSuite) { - - // ctx blocktime is before proposal's stop time - ctx = s.providerCtx().WithBlockTime(time.Now()) - content, err := providertypes.NewStopConsumerChainProposal("title", "description", chainID, time.Now().Add(time.Hour)) - s.Require().NoError(err) - proposal, ok = content.(*providertypes.StopConsumerChainProposal) - s.Require().True(ok) - }, true, false, - }, - { - "valid proposal: fail due to an invalid unbonding index", func(suite *ProviderTestSuite) { - - // ctx blocktime is after proposal's stop time - ctx = s.providerCtx().WithBlockTime(time.Now().Add(time.Hour)) - - // set invalid unbonding op index - s.providerChain.App.(*appProvider.App).ProviderKeeper.SetUnbondingOpIndex(ctx, chainID, 0, []uint64{0}) - - content, err := providertypes.NewStopConsumerChainProposal("title", "description", chainID, time.Now()) - s.Require().NoError(err) - proposal, ok = content.(*providertypes.StopConsumerChainProposal) - s.Require().True(ok) - }, false, true, - }, - } - - for _, tc := range testCases { - tc := tc - - s.Run(tc.name, func() { - s.SetupTest() - s.SetupCCVChannel() - - tc.malleate(s) - - err := s.providerChain.App.(*appProvider.App).ProviderKeeper.StopConsumerChainProposal(ctx, proposal) - if tc.expPass { - s.Require().NoError(err, "error returned on valid case") - if tc.stopReached { - // check that the pending stop consumer chain proposal is deleted - found := s.providerChain.App.(*appProvider.App).ProviderKeeper.GetPendingStopProposal(ctx, chainID, proposal.StopTime) - s.Require().False(found, "pending stop consumer proposal wasn't deleted") - - // check that the consumer chain is removed - s.checkConsumerChainIsRemoved(chainID, false) - - } else { - found := s.providerChain.App.(*appProvider.App).ProviderKeeper.GetPendingStopProposal(ctx, chainID, proposal.StopTime) - s.Require().True(found, "pending stop consumer was not found for chain ID %s", chainID) - - // check that the consumer chain client exists - _, found = s.providerChain.App.(*appProvider.App).ProviderKeeper.GetConsumerClientId(s.providerCtx(), chainID) - s.Require().True(found) - - // check that the chainToChannel and channelToChain exist for the consumer chain ID - _, found = s.providerChain.App.(*appProvider.App).ProviderKeeper.GetChainToChannel(s.providerCtx(), chainID) - s.Require().True(found) - - _, found = s.providerChain.App.(*appProvider.App).ProviderKeeper.GetChannelToChain(s.providerCtx(), s.path.EndpointB.ChannelID) - s.Require().True(found) - - // check that channel is in OPEN state - s.Require().Equal(channeltypes.OPEN, s.path.EndpointB.GetChannel().State) - } - } else { - s.Require().Error(err, "did not return error on invalid case") - } - }) - } -} - // TODO Simon: implement OnChanCloseConfirm in IBC-GO testing to close the consumer chain's channel end -func (s *ProviderTestSuite) TestStopConsumerOnChannelClosed() { +func (s *CCVTestSuite) TestStopConsumerOnChannelClosed() { // init the CCV channel states s.SetupCCVChannel() + s.SetupTransferChannel() s.SendEmptyVSCPacket() // stop the consumer chain @@ -222,7 +124,7 @@ func (s *ProviderTestSuite) TestStopConsumerOnChannelClosed() { // s.Require().False(found) } -func (s *ProviderTestSuite) checkConsumerChainIsRemoved(chainID string, lockUbd bool) { +func (s *CCVTestSuite) checkConsumerChainIsRemoved(chainID string, lockUbd bool) { channelID := s.path.EndpointB.ChannelID providerKeeper := s.providerChain.App.(*appProvider.App).ProviderKeeper @@ -250,8 +152,10 @@ func (s *ProviderTestSuite) checkConsumerChainIsRemoved(chainID string, lockUbd } // verify consumer chain's states are removed + _, found := providerKeeper.GetConsumerGenesis(s.providerCtx(), chainID) + s.Require().False(found) s.Require().False(providerKeeper.GetLockUnbondingOnTimeout(s.providerCtx(), chainID)) - _, found := providerKeeper.GetConsumerClientId(s.providerCtx(), chainID) + _, found = providerKeeper.GetConsumerClientId(s.providerCtx(), chainID) s.Require().False(found) _, found = providerKeeper.GetChainToChannel(s.providerCtx(), chainID) @@ -262,36 +166,31 @@ func (s *ProviderTestSuite) checkConsumerChainIsRemoved(chainID string, lockUbd s.Require().Nil(providerKeeper.GetSlashAcks(s.providerCtx(), chainID)) s.Require().Zero(providerKeeper.GetInitChainHeight(s.providerCtx(), chainID)) - // TODO Simon: check that pendingVSCPacket are emptied - once - // https://github.com/cosmos/interchain-security/issues/27 is implemented + s.Require().Nil(providerKeeper.GetPendingVSCs(s.providerCtx(), chainID)) } -// TODO Simon: duplicated from consumer/keeper_test.go; figure out how it can be refactored -// SendEmptyVSCPacket sends a VSC packet without any changes -// to ensure that the CCV channel gets established -func (s *ProviderTestSuite) SendEmptyVSCPacket() { - providerKeeper := s.providerChain.App.(*appProvider.App).ProviderKeeper - - oldBlockTime := s.providerChain.GetContext().BlockTime() - timeout := uint64(ccv.GetTimeoutTimestamp(oldBlockTime).UnixNano()) - - valUpdateID := providerKeeper.GetValidatorSetUpdateId(s.providerChain.GetContext()) - - pd := ccv.NewValidatorSetChangePacketData( - []abci.ValidatorUpdate{}, - valUpdateID, - nil, - ) - - seq, ok := s.providerChain.App.(*appProvider.App).GetIBCKeeper().ChannelKeeper.GetNextSequenceSend( - s.providerChain.GetContext(), ccv.ProviderPortID, s.path.EndpointB.ChannelID) - s.Require().True(ok) - - packet := channeltypes.NewPacket(pd.GetBytes(), seq, ccv.ProviderPortID, s.path.EndpointB.ChannelID, - ccv.ConsumerPortID, s.path.EndpointA.ChannelID, clienttypes.Height{}, timeout) - - err := s.path.EndpointB.SendPacket(packet) - s.Require().NoError(err) - err = s.path.EndpointA.RecvPacket(packet) - s.Require().NoError(err) +// TestProviderChannelClosed checks that a consumer chain panics +// when the provider channel was established and then closed +func (suite *CCVTestSuite) TestProviderChannelClosed() { + + suite.SetupCCVChannel() + // establish provider channel with a first VSC packet + suite.SendEmptyVSCPacket() + + channelID, found := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.GetProviderChannel(suite.consumerChain.GetContext()) + suite.Require().True(found) + + // close provider channel + err := suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.ChanCloseInit(suite.consumerChain.GetContext(), ccv.ConsumerPortID, channelID) + suite.Require().NoError(err) + suite.Require().True(suite.consumerChain.App.(*appConsumer.App).ConsumerKeeper.IsChannelClosed(suite.consumerChain.GetContext(), channelID)) + + // assert begin blocker did panics + defer func() { + if r := recover(); r != nil { + return + } + suite.Require().Fail("Begin blocker did not panic with a closed channel") + }() + suite.consumerChain.App.(*appConsumer.App).BeginBlocker(suite.consumerChain.GetContext(), abci.RequestBeginBlock{}) } diff --git a/tests/e2e/unbonding_test.go b/tests/e2e/unbonding_test.go index db3eff480c..baeb96eb22 100644 --- a/tests/e2e/unbonding_test.go +++ b/tests/e2e/unbonding_test.go @@ -11,8 +11,9 @@ import ( // TestUndelegationProviderFirst checks that an unbonding operation completes // when the unbonding period elapses first on the provider chain -func (s *ProviderTestSuite) TestUndelegationProviderFirst() { +func (s *CCVTestSuite) TestUndelegationProviderFirst() { s.SetupCCVChannel() + s.SetupTransferChannel() // delegate bondAmt and undelegate 1/2 of it bondAmt := sdk.NewInt(10000000) @@ -54,8 +55,9 @@ func (s *ProviderTestSuite) TestUndelegationProviderFirst() { // TestUndelegationConsumerFirst checks that an unbonding operation completes // when the unbonding period elapses first on the consumer chain -func (s *ProviderTestSuite) TestUndelegationConsumerFirst() { +func (s *CCVTestSuite) TestUndelegationConsumerFirst() { s.SetupCCVChannel() + s.SetupTransferChannel() // delegate bondAmt and undelegate 1/2 of it bondAmt := sdk.NewInt(10000000) @@ -95,8 +97,9 @@ func (s *ProviderTestSuite) TestUndelegationConsumerFirst() { // TestUndelegationNoValsetChange checks that an unbonding operation completes // even when the validator set is not changed -func (s *ProviderTestSuite) TestUndelegationNoValsetChange() { +func (s *CCVTestSuite) TestUndelegationNoValsetChange() { s.SetupCCVChannel() + s.SetupTransferChannel() // delegate bondAmt and undelegate all of it bondAmt := sdk.NewInt(10000000) @@ -137,7 +140,7 @@ func (s *ProviderTestSuite) TestUndelegationNoValsetChange() { // TestUndelegationDuringInit checks that before the CCV channel is established // - no undelegations can complete, even if the provider unbonding period elapses // - all the VSC packets are stored in state as pending -func (s *ProviderTestSuite) TestUndelegationDuringInit() { +func (s *CCVTestSuite) TestUndelegationDuringInit() { // delegate bondAmt and undelegate 1/2 of it bondAmt := sdk.NewInt(10000000) delAddr := s.providerChain.SenderAccount.GetAddress() @@ -175,6 +178,7 @@ func (s *ProviderTestSuite) TestUndelegationDuringInit() { // complete CCV channel setup s.SetupCCVChannel() + s.SetupTransferChannel() // relay VSC packets from provider to consumer relayAllCommittedPackets(s, s.providerChain, s.path, ccv.ProviderPortID, s.path.EndpointB.ChannelID, 2) @@ -200,7 +204,7 @@ func (s *ProviderTestSuite) TestUndelegationDuringInit() { // Check unbonding ops on both sides // Advance time so that provider's unbonding op completes // Check that unbonding has completed in provider staking -func (s *ProviderTestSuite) TestUnbondingNoConsumer() { +func (s *CCVTestSuite) TestUnbondingNoConsumer() { // remove the consumer chain, which was already registered during setup s.providerChain.App.(*appProvider.App).ProviderKeeper.DeleteConsumerClientId(s.providerCtx(), s.consumerChain.ChainID) @@ -231,7 +235,7 @@ func (s *ProviderTestSuite) TestUnbondingNoConsumer() { // TestRedelegationNoConsumer tests a redelegate transaction // submitted on a provider chain with no consumers -func (s *ProviderTestSuite) TestRedelegationNoConsumer() { +func (s *CCVTestSuite) TestRedelegationNoConsumer() { providerKeeper := s.providerChain.App.(*appProvider.App).ProviderKeeper stakingKeeper := s.providerChain.App.(*appProvider.App).StakingKeeper @@ -275,8 +279,9 @@ func (s *ProviderTestSuite) TestRedelegationNoConsumer() { // TestRedelegationWithConsumer tests a redelegate transaction submitted on a provider chain // when the unbonding period elapses first on the provider chain -func (s *ProviderTestSuite) TestRedelegationProviderFirst() { +func (s *CCVTestSuite) TestRedelegationProviderFirst() { s.SetupCCVChannel() + s.SetupTransferChannel() stakingKeeper := s.providerChain.App.(*appProvider.App).StakingKeeper providerKeeper := s.providerChain.App.(*appProvider.App).ProviderKeeper diff --git a/tests/e2e/valset_update_test.go b/tests/e2e/valset_update_test.go index b68edafff9..faecf9cd1f 100644 --- a/tests/e2e/valset_update_test.go +++ b/tests/e2e/valset_update_test.go @@ -13,8 +13,9 @@ import ( ) // TestPacketRoundtrip tests a CCV packet roundtrip when tokens are bonded on provider -func (s *ProviderTestSuite) TestPacketRoundtrip() { +func (s *CCVTestSuite) TestPacketRoundtrip() { s.SetupCCVChannel() + s.SetupTransferChannel() // Bond some tokens on provider to change validator powers bondAmt := sdk.NewInt(1000000) @@ -35,7 +36,7 @@ func (s *ProviderTestSuite) TestPacketRoundtrip() { } // TestSendVSCMaturedPackets tests the behavior of SendVSCMaturedPackets and related state checks -func (suite *ConsumerKeeperTestSuite) TestSendVSCMaturedPackets() { +func (suite *CCVTestSuite) TestSendVSCMaturedPackets() { // setup CCV channel suite.SetupCCVChannel() diff --git a/tests/integration/actions.go b/tests/integration/actions.go index 955e6f481d..32578bc72d 100644 --- a/tests/integration/actions.go +++ b/tests/integration/actions.go @@ -6,11 +6,13 @@ import ( "fmt" "log" "os/exec" + "strconv" "strings" "sync" "time" - clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" + "github.com/cosmos/interchain-security/x/ccv/provider/client" ) type SendTokensAction struct { @@ -191,25 +193,13 @@ type submitConsumerProposalAction struct { initialHeight clienttypes.Height } -// TODO: import this directly from the module once it is merged -type createConsumerChainProposalJSON struct { - Title string `json:"title"` - Description string `json:"description"` - ChainId string `json:"chain_id"` - InitialHeight clienttypes.Height `json:"initial_height"` - GenesisHash []byte `json:"genesis_hash"` - BinaryHash []byte `json:"binary_hash"` - SpawnTime time.Time `json:"spawn_time"` - Deposit string `json:"deposit"` -} - -func (tr TestRun) submitConsumerProposal( +func (tr TestRun) submitConsumerAdditionProposal( action submitConsumerProposalAction, verbose bool, ) { spawnTime := tr.containerConfig.now.Add(time.Duration(action.spawnTime) * time.Millisecond) - prop := createConsumerChainProposalJSON{ - Title: "Create a chain", + prop := client.ConsumerAdditionProposalJSON{ + Title: "Propose the addition of a new chain", Description: "Gonna be a great chain", ChainId: string(tr.chainConfigs[action.consumerChain].chainId), InitialHeight: action.initialHeight, @@ -240,7 +230,7 @@ func (tr TestRun) submitConsumerProposal( //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. bz, err = exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[action.chain].binaryName, - "tx", "gov", "submit-proposal", "create-consumer-chain", + "tx", "gov", "submit-proposal", "consumer-addition", "/temp-proposal.json", `--from`, `validator`+fmt.Sprint(action.from), @@ -257,6 +247,77 @@ func (tr TestRun) submitConsumerProposal( } } +type submitParamChangeProposalAction struct { + chain chainID + from validatorID + deposit uint + subspace string + key string + value interface{} +} + +type paramChangeProposalJSON struct { + Title string `json:"title"` + Description string `json:"description"` + Changes []paramChangeJSON `json:"changes"` + Deposit string `json:"deposit"` +} + +type paramChangeJSON struct { + Subspace string `json:"subspace"` + Key string `json:"key"` + Value interface{} `json:"value"` +} + +func (tr TestRun) submitParamChangeProposal( + action submitParamChangeProposalAction, + verbose bool, +) { + prop := paramChangeProposalJSON{ + Title: "Param change", + Description: "Changing module params", + Changes: []paramChangeJSON{{Subspace: action.subspace, Key: action.key, Value: action.value}}, + Deposit: fmt.Sprint(action.deposit) + `stake`, + } + + bz, err := json.Marshal(prop) + if err != nil { + log.Fatal(err) + } + + jsonStr := string(bz) + if strings.Contains(jsonStr, "'") { + log.Fatal("prop json contains single quote") + } + + //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. + bz, err = exec.Command("docker", "exec", tr.containerConfig.instanceName, + "/bin/bash", "-c", fmt.Sprintf(`echo '%s' > %s`, jsonStr, "/params-proposal.json")).CombinedOutput() + + if err != nil { + log.Fatal(err, "\n", string(bz)) + } + + //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. + bz, err = exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[action.chain].binaryName, + + "tx", "gov", "submit-proposal", "param-change", + "/params-proposal.json", + + `--from`, `validator`+fmt.Sprint(action.from), + `--chain-id`, string(tr.chainConfigs[action.chain].chainId), + `--home`, tr.getValidatorHome(action.chain, action.from), + `--node`, tr.getValidatorNode(action.chain, action.from), + `--keyring-backend`, `test`, + `-b`, `block`, + `-y`, + ).CombinedOutput() + + if err != nil { + log.Fatal(err, "\n", string(bz)) + } +} + type voteGovProposalAction struct { chain chainID from []validatorID @@ -300,9 +361,10 @@ func (tr TestRun) voteGovProposal( } type startConsumerChainAction struct { - consumerChain chainID - providerChain chainID - validators []StartChainValidator + consumerChain chainID + providerChain chainID + genesisChanges string + validators []StartChainValidator } func (tr TestRun) startConsumerChain( @@ -329,10 +391,15 @@ func (tr TestRun) startConsumerChain( log.Fatal(err, "\n", string(bz)) } + genesisChanges := ".app_state.ccvconsumer = " + string(bz) + if action.genesisChanges != "" { + genesisChanges = genesisChanges + " | " + action.genesisChanges + } + tr.startChain(StartChainAction{ chain: action.consumerChain, validators: action.validators, - genesisChanges: ".app_state.ccvconsumer = " + string(bz), + genesisChanges: genesisChanges, skipGentx: true, }, verbose) } @@ -518,6 +585,88 @@ func (tr TestRun) addIbcChannel( } } +type transferChannelCompleteAction struct { + chainA chainID + chainB chainID + connectionA uint + portA string + portB string + order string + channelA uint + channelB uint +} + +func (tr TestRun) transferChannelComplete( + action transferChannelCompleteAction, + verbose bool, +) { + //#nosec G204 -- Bypass linter warning for spawning subprocess with chanOpenTryCmd arguments. + chanOpenTryCmd := exec.Command("docker", "exec", tr.containerConfig.instanceName, "hermes", + "tx", "chan-open-try", + "--dst-chain", string(tr.chainConfigs[action.chainB].chainId), + "--src-chain", string(tr.chainConfigs[action.chainA].chainId), + "--dst-connection", "connection-"+fmt.Sprint(action.connectionA), + "--dst-port", action.portB, + "--src-port", action.portA, + "--src-channel", "channel-"+fmt.Sprint(action.channelA), + ) + executeCommand(chanOpenTryCmd, "transferChanOpenTry") + + //#nosec G204 -- Bypass linter warning for spawning subprocess with chanOpenAckCmd arguments. + chanOpenAckCmd := exec.Command("docker", "exec", tr.containerConfig.instanceName, "hermes", + "tx", "chan-open-ack", + "--dst-chain", string(tr.chainConfigs[action.chainA].chainId), + "--src-chain", string(tr.chainConfigs[action.chainB].chainId), + "--dst-connection", "connection-"+fmt.Sprint(action.connectionA), + "--dst-port", action.portA, + "--src-port", action.portB, + "--dst-channel", "channel-"+fmt.Sprint(action.channelA), + "--src-channel", "channel-"+fmt.Sprint(action.channelB), + ) + executeCommand(chanOpenAckCmd, "transferChanOpenAck") + + //#nosec G204 -- Bypass linter warning for spawning subprocess with chanOpenConfirmCmd arguments. + chanOpenConfirmCmd := exec.Command("docker", "exec", tr.containerConfig.instanceName, "hermes", + "tx", "chan-open-confirm", + "--dst-chain", string(tr.chainConfigs[action.chainB].chainId), + "--src-chain", string(tr.chainConfigs[action.chainA].chainId), + "--dst-connection", "connection-"+fmt.Sprint(action.connectionA), + "--dst-port", action.portB, + "--src-port", action.portA, + "--dst-channel", "channel-"+fmt.Sprint(action.channelB), + "--src-channel", "channel-"+fmt.Sprint(action.channelA), + ) + executeCommand(chanOpenConfirmCmd, "transferChanOpenConfirm") +} + +func executeCommand(cmd *exec.Cmd, cmdName string) { + if verbose { + fmt.Println(cmdName+" cmd:", cmd.String()) + } + + cmdReader, err := cmd.StdoutPipe() + if err != nil { + log.Fatal(err) + } + cmd.Stderr = cmd.Stdout + + if err := cmd.Start(); err != nil { + log.Fatal(err) + } + + scanner := bufio.NewScanner(cmdReader) + + for scanner.Scan() { + out := scanner.Text() + if verbose { + fmt.Println(cmdName + ": " + out) + } + } + if err := scanner.Err(); err != nil { + log.Fatal(err) + } +} + type relayPacketsAction struct { chain chainID port string @@ -545,6 +694,27 @@ func (tr TestRun) relayPackets( } } +type relayRewardPacketsToProviderAction struct { + consumerChain chainID + providerChain chainID + port string + channel uint +} + +func (tr TestRun) relayRewardPacketsToProvider( + action relayRewardPacketsToProviderAction, + verbose bool, +) { + blockPerDistribution, _ := strconv.ParseUint(strings.Trim(tr.getParam(action.consumerChain, Param{Subspace: "ccvconsumer", Key: "BlocksPerDistributionTransmission"}), "\""), 10, 64) + currentBlock := uint64(tr.getBlockHeight(action.consumerChain)) + if currentBlock <= blockPerDistribution { + tr.waitBlocks(action.consumerChain, uint(blockPerDistribution-currentBlock+1), 60*time.Second) + } + + tr.relayPackets(relayPacketsAction{chain: action.consumerChain, port: action.port, channel: action.channel}, verbose) + tr.waitBlocks(action.providerChain, 1, 10*time.Second) +} + type delegateTokensAction struct { chain chainID from validatorID @@ -736,3 +906,59 @@ func (tr TestRun) unjailValidator(action unjailValidatorAction, verbose bool) { log.Fatal(err, "\n", string(bz)) } } + +type registerRepresentativeAction struct { + chain chainID + representatives []validatorID + stakes []uint +} + +func (tr TestRun) registerRepresentative( + action registerRepresentativeAction, + verbose bool, +) { + var wg sync.WaitGroup + for i, val := range action.representatives { + wg.Add(1) + stake := action.stakes[i] + go func(val validatorID, stake uint) { + defer wg.Done() + + //#nosec G204 -- Bypass linter warning for spawning subprocess with pubKeycmd arguments. + pubKeycmd := exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[action.chain].binaryName, + "tendermint", "show-validator", + `--home`, tr.getValidatorHome(action.chain, val), + ) + + bzPubKey, err := pubKeycmd.CombinedOutput() + if err != nil { + log.Fatal(err, "\n", string(bzPubKey)) + } + + //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. + bz, err := exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[action.chain].binaryName, + "tx", "staking", "create-validator", + `--amount`, fmt.Sprint(stake)+"stake", + `--pubkey`, string(bzPubKey), + `--moniker`, fmt.Sprint(val), + `--commission-rate`, "0.1", + `--commission-max-rate`, "0.2", + `--commission-max-change-rate`, "0.01", + `--min-self-delegation`, "1", + `--from`, `validator`+fmt.Sprint(val), + `--chain-id`, string(tr.chainConfigs[action.chain].chainId), + `--home`, tr.getValidatorHome(action.chain, val), + `--node`, tr.getValidatorNode(action.chain, val), + `--keyring-backend`, `test`, + `-b`, `block`, + `-y`, + ).CombinedOutput() + + if err != nil { + log.Fatal(err, "\n", string(bz)) + } + }(val, stake) + } + + wg.Wait() +} diff --git a/tests/integration/config.go b/tests/integration/config.go index 9fb890e246..ffebd002dc 100644 --- a/tests/integration/config.go +++ b/tests/integration/config.go @@ -116,6 +116,17 @@ func DefaultTestRun() TestRun { ".app_state.slashing.params.downtime_jail_duration = \"2s\" | " + ".app_state.slashing.params.slash_fraction_downtime = \"0.010000000000000000\"", }, + chainID("democ"): { + chainId: chainID("democ"), + binaryName: "interchain-security-cdd", + ipPrefix: "7.7.9", + votingWaitTime: 10, + genesisChanges: ".app_state.gov.voting_params.voting_period = \"10s\" | " + + ".app_state.slashing.params.signed_blocks_window = \"2\" | " + + ".app_state.slashing.params.min_signed_per_window = \"0.500000000000000000\" | " + + ".app_state.slashing.params.downtime_jail_duration = \"2s\" | " + + ".app_state.slashing.params.slash_fraction_downtime = \"0.010000000000000000\"", + }, }, } } diff --git a/tests/integration/main.go b/tests/integration/main.go index 67a4b35f48..fada60b41b 100644 --- a/tests/integration/main.go +++ b/tests/integration/main.go @@ -11,9 +11,10 @@ import ( "github.com/kylelemons/godebug/pretty" ) -var verbose = true +var verbose = false func main() { + fmt.Println("============================================ start happy path tests ============================================") start := time.Now() tr := DefaultTestRun() tr.ParseCLIFlags() @@ -24,7 +25,17 @@ func main() { tr.runStep(step, verbose) } - fmt.Printf("test successful - time elapsed %v\n", time.Since(start)) + fmt.Printf("happy path tests successful - time elapsed %v\n", time.Since(start)) + + fmt.Println("============================================ start democracy tests ============================================") + start = time.Now() + tr.startDocker() + + for _, step := range democracySteps { + tr.runStep(step, verbose) + } + + fmt.Printf("democracy tests successful - time elapsed %v\n", time.Since(start)) } func (tr TestRun) runStep(step Step, verbose bool) { @@ -37,7 +48,9 @@ func (tr TestRun) runStep(step Step, verbose bool) { case submitTextProposalAction: tr.submitTextProposal(action, verbose) case submitConsumerProposalAction: - tr.submitConsumerProposal(action, verbose) + tr.submitConsumerAdditionProposal(action, verbose) + case submitParamChangeProposalAction: + tr.submitParamChangeProposal(action, verbose) case voteGovProposalAction: tr.voteGovProposal(action, verbose) case startConsumerChainAction: @@ -48,8 +61,12 @@ func (tr TestRun) runStep(step Step, verbose bool) { tr.addIbcConnection(action, verbose) case addIbcChannelAction: tr.addIbcChannel(action, verbose) + case transferChannelCompleteAction: + tr.transferChannelComplete(action, verbose) case relayPacketsAction: tr.relayPackets(action, verbose) + case relayRewardPacketsToProviderAction: + tr.relayRewardPacketsToProvider(action, verbose) case delegateTokensAction: tr.delegateTokens(action, verbose) case unbondTokensAction: @@ -60,6 +77,8 @@ func (tr TestRun) runStep(step Step, verbose bool) { tr.invokeDowntimeSlash(action, verbose) case unjailValidatorAction: tr.unjailValidator(action, verbose) + case registerRepresentativeAction: + tr.registerRepresentative(action, verbose) default: log.Fatalf(fmt.Sprintf(`unknown action: %#v`, action)) } diff --git a/tests/integration/state.go b/tests/integration/state.go index b8768c6963..23498ce1e5 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -9,7 +9,7 @@ import ( "strings" "time" - clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" "github.com/tidwall/gjson" "gopkg.in/yaml.v2" ) @@ -17,9 +17,12 @@ import ( type State map[chainID]ChainState type ChainState struct { - ValBalances *map[validatorID]uint - Proposals *map[uint]Proposal - ValPowers *map[validatorID]uint + ValBalances *map[validatorID]uint + Proposals *map[uint]Proposal + ValPowers *map[validatorID]uint + RepresentativePowers *map[validatorID]uint + Params *[]Param + Rewards *Rewards } type Proposal interface { @@ -42,8 +45,34 @@ type ConsumerProposal struct { Status string } +type Rewards struct { + IsRewarded map[validatorID]bool + //if true it will calculate if the validator/delegator is rewarded between 2 successive blocks, + //otherwise it will calculate if it received any rewards since the 1st block + IsIncrementalReward bool + //if true checks rewards for "stake" token, otherwise checks rewards from + //other chains (e.g. false is used to check if provider received rewards from a consumer chain) + IsNativeDenom bool +} + func (p ConsumerProposal) isProposal() {} +type ParamsProposal struct { + Deposit uint + Status string + Subspace string + Key string + Value string +} + +func (p ParamsProposal) isProposal() {} + +type Param struct { + Subspace string + Key string + Value string +} + func (tr TestRun) getState(modelState State) State { systemState := State{} for k, modelState := range modelState { @@ -72,6 +101,21 @@ func (tr TestRun) getChainState(chain chainID, modelState ChainState) ChainState chainState.ValPowers = &powers } + if modelState.RepresentativePowers != nil { + representPowers := tr.getRepresentativePowers(chain, *modelState.RepresentativePowers) + chainState.RepresentativePowers = &representPowers + } + + if modelState.Params != nil { + params := tr.getParams(chain, *modelState.Params) + chainState.Params = ¶ms + } + + if modelState.Rewards != nil { + rewards := tr.getRewards(chain, *modelState.Rewards) + chainState.Rewards = &rewards + } + return chainState } @@ -141,6 +185,66 @@ func (tr TestRun) getValPowers(chain chainID, modelState map[validatorID]uint) m return actualState } +func (tr TestRun) getRepresentativePowers(chain chainID, modelState map[validatorID]uint) map[validatorID]uint { + actualState := map[validatorID]uint{} + for k := range modelState { + actualState[k] = tr.getRepresentativePower(chain, k) + } + + return actualState +} + +func (tr TestRun) getParams(chain chainID, modelState []Param) []Param { + actualState := []Param{} + for _, p := range modelState { + actualState = append(actualState, Param{Subspace: p.Subspace, Key: p.Key, Value: tr.getParam(chain, p)}) + } + + return actualState +} + +func (tr TestRun) getRewards(chain chainID, modelState Rewards) Rewards { + receivedRewards := map[validatorID]bool{} + + currentBlock := tr.getBlockHeight(chain) + tr.waitBlocks(chain, 1, 10*time.Second) + nextBlock := tr.getBlockHeight(chain) + tr.waitBlocks(chain, 1, 10*time.Second) + + if !modelState.IsIncrementalReward { + currentBlock = 1 + } + for k := range modelState.IsRewarded { + receivedRewards[k] = tr.getReward(chain, k, nextBlock, modelState.IsNativeDenom) > tr.getReward(chain, k, currentBlock, modelState.IsNativeDenom) + } + + return Rewards{IsRewarded: receivedRewards, IsIncrementalReward: modelState.IsIncrementalReward, IsNativeDenom: modelState.IsNativeDenom} +} + +func (tr TestRun) getReward(chain chainID, validator validatorID, blockHeight uint, isNativeDenom bool) float64 { + //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. + bz, err := exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[chain].binaryName, + + "query", "distribution", "rewards", + tr.validatorConfigs[validator].delAddress, + + `--height`, fmt.Sprint(blockHeight), + `--node`, tr.getValidatorNode(chain, tr.getDefaultValidator(chain)), + `-o`, `json`, + ).CombinedOutput() + + if err != nil { + log.Fatal(err, "\n", string(bz)) + } + + denomCondition := `total.#(denom!="stake").amount` + if isNativeDenom { + denomCondition = `total.#(denom=="stake").amount` + } + + return gjson.Get(string(bz), denomCondition).Float() +} + func (tr TestRun) getBalance(chain chainID, validator validatorID) uint { //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. bz, err := exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[chain].binaryName, @@ -200,7 +304,7 @@ func (tr TestRun) getProposal(chain chainID, proposal uint) Proposal { Title: title, Description: description, } - case "/interchain_security.ccv.provider.v1.CreateConsumerChainProposal": + case "/interchain_security.ccv.provider.v1.ConsumerAdditionProposal": chainId := gjson.Get(string(bz), `content.chain_id`).String() spawnTime := gjson.Get(string(bz), `content.spawn_time`).Time().Sub(tr.containerConfig.now) @@ -222,7 +326,14 @@ func (tr TestRun) getProposal(chain chainID, proposal uint) Proposal { RevisionHeight: gjson.Get(string(bz), `content.initial_height.revision_height`).Uint(), }, } - + case "/cosmos.params.v1beta1.ParameterChangeProposal": + return ParamsProposal{ + Deposit: uint(deposit), + Status: status, + Subspace: gjson.Get(string(bz), `content.changes.0.subspace`).String(), + Key: gjson.Get(string(bz), `content.changes.0.key`).String(), + Value: gjson.Get(string(bz), `content.changes.0.value`).String(), + } } log.Fatal("unknown proposal type", string(bz)) @@ -288,6 +399,47 @@ func (tr TestRun) getValPower(chain chainID, validator validatorID) uint { return 0 } +func (tr TestRun) getRepresentativePower(chain chainID, validator validatorID) uint { + //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. + bz, err := exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[chain].binaryName, + + "query", "staking", "validator", + tr.validatorConfigs[validator].valoperAddress, + + `--node`, tr.getValidatorNode(chain, tr.getDefaultValidator(chain)), + `-o`, `json`, + ).CombinedOutput() + + if err != nil { + log.Fatal(err, "\n", string(bz)) + } + + amount := gjson.Get(string(bz), `tokens`) + + return uint(amount.Uint()) +} + +func (tr TestRun) getParam(chain chainID, param Param) string { + //#nosec G204 -- Bypass linter warning for spawning subprocess with cmd arguments. + bz, err := exec.Command("docker", "exec", tr.containerConfig.instanceName, tr.chainConfigs[chain].binaryName, + + "query", "params", "subspace", + param.Subspace, + param.Key, + + `--node`, tr.getValidatorNode(chain, tr.getDefaultValidator(chain)), + `-o`, `json`, + ).CombinedOutput() + + if err != nil { + log.Fatal(err, "\n", string(bz)) + } + + value := gjson.Get(string(bz), `value`) + + return value.String() +} + // Gets a default validator for txs and queries using the first subdirectory // of the directory of the input chain, which will be the home directory // of one of the validators. diff --git a/tests/integration/steps.go b/tests/integration/steps.go index 9c4889ad5f..e720bc698c 100644 --- a/tests/integration/steps.go +++ b/tests/integration/steps.go @@ -1,7 +1,7 @@ package main import ( - clienttypes "github.com/cosmos/ibc-go/modules/core/02-client/types" + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" ) type Step struct { diff --git a/tests/integration/steps_democracy.go b/tests/integration/steps_democracy.go new file mode 100644 index 0000000000..537411a450 --- /dev/null +++ b/tests/integration/steps_democracy.go @@ -0,0 +1,485 @@ +package main + +import ( + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" +) + +var democracySteps = []Step{ + { + action: StartChainAction{ + chain: chainID("provi"), + validators: []StartChainValidator{ + {id: validatorID("bob"), stake: 500000000, allocation: 10000000000}, + {id: validatorID("alice"), stake: 500000000, allocation: 10000000000}, + {id: validatorID("carol"), stake: 500000000, allocation: 10000000000}, + }, + genesisChanges: "", // No custom genesis changes for this action + skipGentx: false, + }, + state: State{ + chainID("provi"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9500000000, + validatorID("bob"): 9500000000, + }, + }, + }, + }, + { + action: SendTokensAction{ + chain: chainID("provi"), + from: validatorID("alice"), + to: validatorID("bob"), + amount: 2, + }, + state: State{ + chainID("provi"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9499999998, + validatorID("bob"): 9500000002, + }, + }, + }, + }, + { + action: submitConsumerProposalAction{ + chain: chainID("provi"), + from: validatorID("alice"), + deposit: 10000001, + consumerChain: chainID("democ"), + spawnTime: 0, + initialHeight: clienttypes.Height{RevisionNumber: 0, RevisionHeight: 1}, + }, + state: State{ + chainID("provi"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9489999997, + validatorID("bob"): 9500000002, + }, + Proposals: &map[uint]Proposal{ + 1: ConsumerProposal{ + Deposit: 10000001, + Chain: chainID("democ"), + SpawnTime: 0, + InitialHeight: clienttypes.Height{RevisionNumber: 0, RevisionHeight: 1}, + Status: "PROPOSAL_STATUS_VOTING_PERIOD", + }, + }, + }, + }, + }, + { + action: voteGovProposalAction{ + chain: chainID("provi"), + from: []validatorID{validatorID("alice"), validatorID("bob"), validatorID("carol")}, + vote: []string{"yes", "yes", "yes"}, + propNumber: 1, + }, + state: State{ + chainID("provi"): ChainState{ + Proposals: &map[uint]Proposal{ + 1: ConsumerProposal{ + Deposit: 10000001, + Chain: chainID("democ"), + SpawnTime: 0, + InitialHeight: clienttypes.Height{RevisionNumber: 0, RevisionHeight: 1}, + Status: "PROPOSAL_STATUS_PASSED", + }, + }, + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9499999998, + validatorID("bob"): 9500000002, + }, + }, + }, + }, + { + action: startConsumerChainAction{ + consumerChain: chainID("democ"), + providerChain: chainID("provi"), + genesisChanges: ".app_state.ccvconsumer.params.blocks_per_distribution_transmission = \"10\"", + validators: []StartChainValidator{ + {id: validatorID("carol"), stake: 500000000, allocation: 10000000000}, + {id: validatorID("alice"), stake: 500000000, allocation: 10000000000}, + {id: validatorID("bob"), stake: 500000000, allocation: 10000000000}, + }, + }, + state: State{ + chainID("provi"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9499999998, + validatorID("bob"): 9500000002, + }, + }, + chainID("democ"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 10000000000, + validatorID("bob"): 10000000000, + }, + }, + }, + }, + { + action: SendTokensAction{ + chain: chainID("democ"), + from: validatorID("alice"), + to: validatorID("bob"), + amount: 1, + }, + state: State{ + chainID("democ"): ChainState{ + // Tx on consumer chain should not go through before ICS channel is setup + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 10000000000, + validatorID("bob"): 10000000000, + }, + }, + }, + }, + { + action: addIbcConnectionAction{ + chainA: chainID("democ"), + chainB: chainID("provi"), + clientA: 0, + clientB: 0, + order: "ordered", + }, + state: State{}, + }, + { + action: addIbcChannelAction{ + chainA: chainID("democ"), + chainB: chainID("provi"), + connectionA: 0, + portA: "consumer", + portB: "provider", + order: "ordered", + }, + state: State{}, + }, + { + action: transferChannelCompleteAction{ + chainA: chainID("democ"), + chainB: chainID("provi"), + connectionA: 0, + portA: "transfer", + portB: "transfer", + order: "unordered", + channelA: 1, + channelB: 1, + }, + state: State{}, + }, + { + action: delegateTokensAction{ + chain: chainID("provi"), + from: validatorID("alice"), + to: validatorID("alice"), + amount: 11000000, + }, + state: State{ + chainID("provi"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + }, + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 500, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + }, + }, + }, + { + action: SendTokensAction{ + chain: chainID("democ"), + from: validatorID("alice"), + to: validatorID("bob"), + amount: 1, + }, + state: State{ + chainID("democ"): ChainState{ + // Tx should not go through, ICS channel is not setup until first VSC packet has been relayed to consumer + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 10000000000, + validatorID("bob"): 10000000000, + }, + }, + }, + }, + { + action: relayPacketsAction{ + chain: chainID("provi"), + port: "provider", + channel: 0, + }, + state: State{ + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + }, + }, + }, + { + action: SendTokensAction{ + chain: chainID("democ"), + from: validatorID("alice"), + to: validatorID("bob"), + amount: 1, + }, + state: State{ + chainID("democ"): ChainState{ + // Now tx should execute + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9999999999, + validatorID("bob"): 10000000001, + }, + }, + }, + }, + // sanity checks end here + { + action: registerRepresentativeAction{ + chain: chainID("democ"), + representatives: []validatorID{validatorID("alice"), validatorID("bob")}, + stakes: []uint{100000000, 40000000}, + }, + state: State{ + chainID("democ"): ChainState{ + RepresentativePowers: &map[validatorID]uint{ + validatorID("alice"): 100000000, + validatorID("bob"): 40000000, + }, + Rewards: &Rewards{ + IsRewarded: map[validatorID]bool{ + validatorID("alice"): true, + validatorID("bob"): true, + validatorID("carol"): false, + }, + IsIncrementalReward: true, + IsNativeDenom: true, + }, + }, + }, + }, + { + action: delegateTokensAction{ + chain: chainID("democ"), + from: validatorID("carol"), + to: validatorID("alice"), + amount: 500000, + }, + state: State{ + chainID("democ"): ChainState{ + //Check that delegators on gov-consumer chain can change representative powers + RepresentativePowers: &map[validatorID]uint{ + validatorID("alice"): 100500000, + validatorID("bob"): 40000000, + }, + // Check that delegating on gov-consumer does not change validator powers + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + //Check that tokens are minted and distributed to representatives and their delegators + Rewards: &Rewards{ + IsRewarded: map[validatorID]bool{ + validatorID("alice"): true, + validatorID("bob"): true, + validatorID("carol"): true, + }, + IsIncrementalReward: true, + IsNativeDenom: true, + }, + }, + }, + }, + { + action: submitParamChangeProposalAction{ + chain: chainID("democ"), + from: validatorID("alice"), + deposit: 10000001, + subspace: "staking", + key: "MaxValidators", + value: 105, + }, + state: State{ + chainID("democ"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9889999998, + validatorID("bob"): 9960000001, + }, + Proposals: &map[uint]Proposal{ + 1: ParamsProposal{ + Deposit: 10000001, + Status: "PROPOSAL_STATUS_VOTING_PERIOD", + Subspace: "staking", + Key: "MaxValidators", + Value: "105", + }, + }, + }, + }, + }, + { + //Have accounts vote on something on the gov-consumer chain + action: voteGovProposalAction{ + chain: chainID("democ"), + from: []validatorID{validatorID("alice"), validatorID("bob")}, + vote: []string{"yes", "no"}, + propNumber: 1, + }, + state: State{ + chainID("democ"): ChainState{ + ValBalances: &map[validatorID]uint{ + validatorID("alice"): 9899999999, + validatorID("bob"): 9960000001, + }, + //Check that the parameter is changed on gov-consumer chain + Params: &([]Param{{Subspace: "staking", Key: "MaxValidators", Value: "105"}}), + }, + }, + }, + { + action: relayRewardPacketsToProviderAction{ + consumerChain: chainID("democ"), + providerChain: chainID("provi"), + port: "transfer", + channel: 1, + }, + state: State{ + chainID("provi"): ChainState{ + //Check that tokens are minted and sent to provider chain and distributed to validators and their delegators on provider chain + Rewards: &Rewards{ + IsRewarded: map[validatorID]bool{ + validatorID("alice"): true, + validatorID("bob"): true, + validatorID("carol"): true, + }, + IsIncrementalReward: false, + IsNativeDenom: false, + }, + }, + }, + }, + { + action: downtimeSlashAction{ + chain: chainID("democ"), + // TODO: First validator cannot be brought down until this issue is resolved: + // https://github.com/cosmos/interchain-security/issues/263 + validator: validatorID("bob"), + }, + state: State{ + // validator should be slashed on consumer, powers not affected on either chain yet + chainID("provi"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + }, + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + }, + }, + }, + { + action: relayPacketsAction{ + chain: chainID("provi"), + port: "provider", + channel: 0, + }, + state: State{ + chainID("provi"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + // Downtime jailing and corresponding voting power change are processed by provider + validatorID("bob"): 0, + validatorID("carol"): 500, + }, + }, + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 500, + validatorID("carol"): 500, + }, + }, + }, + }, + // A block is incremented each action, hence why VSC is committed on provider, + // and can now be relayed as packet to consumer + { + action: relayPacketsAction{ + chain: chainID("provi"), + port: "provider", + channel: 0, + }, + state: State{ + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + // VSC now seen on consumer + validatorID("bob"): 0, + validatorID("carol"): 500, + }, + }, + }, + }, + { + action: unjailValidatorAction{ + provider: chainID("provi"), + validator: validatorID("bob"), + }, + state: State{ + chainID("provi"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + // 1% of bob's stake should be slashed as set in config.go + validatorID("bob"): 495, + validatorID("carol"): 500, + }, + }, + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 0, + validatorID("carol"): 500, + }, + }, + }, + }, + { + action: relayPacketsAction{ + chain: chainID("provi"), + port: "provider", + channel: 0, + }, + state: State{ + chainID("democ"): ChainState{ + ValPowers: &map[validatorID]uint{ + validatorID("alice"): 511, + validatorID("bob"): 495, + validatorID("carol"): 500, + }, + //Check that slashing on the gov-consumer chain does not result in slashing for the representatives or their delegators + RepresentativePowers: &map[validatorID]uint{ + validatorID("alice"): 100500000, + validatorID("bob"): 40000000, + }, + }, + }, + }, +} diff --git a/testutil/keeper/expectations.go b/testutil/keeper/expectations.go new file mode 100644 index 0000000000..7390115c7a --- /dev/null +++ b/testutil/keeper/expectations.go @@ -0,0 +1,93 @@ +package keeper + +import ( + time "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" + conntypes "github.com/cosmos/ibc-go/v3/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" + ibctmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" + "github.com/golang/mock/gomock" + + ccv "github.com/cosmos/interchain-security/x/ccv/types" + + extra "github.com/oxyno-zeta/gomock-extra-matcher" +) + +// +// A file containing groups of commonly used mock expectations. +// Note: Each group of mock expectations is associated with a single method +// that may be called during unit tests. +// + +// GetMocksForCreateConsumerClient returns mock expectations needed to call CreateConsumerClient(). +func GetMocksForCreateConsumerClient(ctx sdk.Context, mocks *MockedKeepers, + expectedChainID string, expectedLatestHeight clienttypes.Height) []*gomock.Call { + + expectations := []*gomock.Call{ + mocks.MockStakingKeeper.EXPECT().UnbondingTime(ctx).Return(time.Hour).Times( + 1, // called once in CreateConsumerClient + ), + + mocks.MockClientKeeper.EXPECT().CreateClient( + ctx, + // Allows us to expect a match by field. These are the only two client state values + // that are dependant on parameters passed to CreateConsumerClient. + extra.StructMatcher().Field( + "ChainId", expectedChainID).Field( + "LatestHeight", expectedLatestHeight, + ), + gomock.Any(), + ).Return("clientID", nil).Times(1), + } + + expectations = append(expectations, GetMocksForMakeConsumerGenesis(ctx, mocks, time.Hour)...) + return expectations +} + +// GetMocksForMakeConsumerGenesis returns mock expectations needed to call MakeConsumerGenesis(). +func GetMocksForMakeConsumerGenesis(ctx sdk.Context, mocks *MockedKeepers, + unbondingTimeToInject time.Duration) []*gomock.Call { + return []*gomock.Call{ + mocks.MockStakingKeeper.EXPECT().UnbondingTime(ctx).Return(unbondingTimeToInject).Times(1), + + mocks.MockClientKeeper.EXPECT().GetSelfConsensusState(ctx, + clienttypes.GetSelfHeight(ctx)).Return(&ibctmtypes.ConsensusState{}, nil).Times(1), + + mocks.MockStakingKeeper.EXPECT().IterateLastValidatorPowers(ctx, gomock.Any()).Times(1), + } +} + +// GetMocksForSetConsumerChain returns mock expectations needed to call SetConsumerChain(). +func GetMocksForSetConsumerChain(ctx sdk.Context, mocks *MockedKeepers, + chainIDToInject string) []*gomock.Call { + return []*gomock.Call{ + mocks.MockChannelKeeper.EXPECT().GetChannel(ctx, ccv.ProviderPortID, gomock.Any()).Return( + channeltypes.Channel{ + State: channeltypes.OPEN, + ConnectionHops: []string{"connectionID"}, + }, + true, + ).Times(1), + mocks.MockConnectionKeeper.EXPECT().GetConnection(ctx, "connectionID").Return( + conntypes.ConnectionEnd{ClientId: "clientID"}, true, + ).Times(1), + mocks.MockClientKeeper.EXPECT().GetClientState(ctx, "clientID").Return( + &ibctmtypes.ClientState{ChainId: chainIDToInject}, true, + ).Times(1), + } +} + +// GetMocksForStopConsumerChain returns mock expectations needed to call StopConsumerChain(). +func GetMocksForStopConsumerChain(ctx sdk.Context, mocks *MockedKeepers) []*gomock.Call { + dummyCap := &capabilitytypes.Capability{} + return []*gomock.Call{ + mocks.MockChannelKeeper.EXPECT().GetChannel(ctx, ccv.ProviderPortID, "channelID").Return( + channeltypes.Channel{State: channeltypes.OPEN}, true, + ).Times(1), + mocks.MockScopedKeeper.EXPECT().GetCapability(ctx, gomock.Any()).Return(dummyCap, true).Times(1), + mocks.MockChannelKeeper.EXPECT().ChanCloseInit(ctx, ccv.ProviderPortID, "channelID", dummyCap).Times(1), + } +} diff --git a/testutil/keeper/unit_test_helpers.go b/testutil/keeper/unit_test_helpers.go index a4dd366139..f2aeb2e3ac 100644 --- a/testutil/keeper/unit_test_helpers.go +++ b/testutil/keeper/unit_test_helpers.go @@ -2,17 +2,20 @@ package keeper import ( "testing" + time "time" + + tmtypes "github.com/tendermint/tendermint/types" "github.com/cosmos/cosmos-sdk/codec" codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/store" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" - capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" consumerkeeper "github.com/cosmos/interchain-security/x/ccv/consumer/keeper" providerkeeper "github.com/cosmos/interchain-security/x/ccv/provider/keeper" "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/log" @@ -22,169 +25,178 @@ import ( cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" cryptotypes "github.com/cosmos/cosmos-sdk/crypto/types" + + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" + commitmenttypes "github.com/cosmos/ibc-go/v3/modules/core/23-commitment/types" + ibctmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" + providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" ) -// Constructs a provider keeper and context object for unit tests, backed by an in-memory db. -func GetProviderKeeperAndCtx(t testing.TB) (providerkeeper.Keeper, sdk.Context) { +// Parameters needed to instantiate an in-memory keeper +type InMemKeeperParams struct { + Cdc *codec.ProtoCodec + StoreKey *storetypes.KVStoreKey + ParamsSubspace *paramstypes.Subspace + Ctx sdk.Context +} + +// NewInMemKeeperParams instantiates in-memory keeper params with default values +func NewInMemKeeperParams(t testing.TB) InMemKeeperParams { + storeKey := sdk.NewKVStoreKey(types.StoreKey) + memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) + + db := tmdb.NewMemDB() + stateStore := store.NewCommitMultiStore(db) + stateStore.MountStoreWithDB(storeKey, sdk.StoreTypeIAVL, db) + stateStore.MountStoreWithDB(memStoreKey, sdk.StoreTypeMemory, nil) + require.NoError(t, stateStore.LoadLatestVersion()) - cdc, storeKey, paramsSubspace, ctx := SetupInMemKeeper(t) + registry := codectypes.NewInterfaceRegistry() + cdc := codec.NewProtoCodec(registry) - k := providerkeeper.NewKeeper( - cdc, + paramsSubspace := paramstypes.NewSubspace(cdc, + codec.NewLegacyAmino(), storeKey, - paramsSubspace, - &MockScopedKeeper{}, - &MockChannelKeeper{}, - &MockPortKeeper{}, - &MockConnectionKeeper{}, - &MockClientKeeper{}, - &MockStakingKeeper{}, - &MockSlashingKeeper{}, - &MockAccountKeeper{}, - "", + memStoreKey, + paramstypes.ModuleName, ) - return k, ctx -} + ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) -// Constructs a consumer keeper and context object for unit tests, backed by an in-memory db. -func GetConsumerKeeperAndCtx(t testing.TB) (consumerkeeper.Keeper, sdk.Context) { + return InMemKeeperParams{ + Cdc: cdc, + StoreKey: storeKey, + ParamsSubspace: ¶msSubspace, + Ctx: ctx, + } +} - cdc, storeKey, paramsSubspace, ctx := SetupInMemKeeper(t) +// A struct holding pointers to any mocked external keeper needed for provider/consumer keeper setup. +type MockedKeepers struct { + *MockScopedKeeper + *MockChannelKeeper + *MockPortKeeper + *MockConnectionKeeper + *MockClientKeeper + *MockStakingKeeper + *MockSlashingKeeper + *MockAccountKeeper + *MockBankKeeper + *MockIBCTransferKeeper + *MockIBCCoreKeeper +} - k := consumerkeeper.NewKeeper( - cdc, - storeKey, - paramsSubspace, - &MockScopedKeeper{}, - &MockChannelKeeper{}, - &MockPortKeeper{}, - &MockConnectionKeeper{}, - &MockClientKeeper{}, - &MockSlashingKeeper{}, - &MockBankKeeper{}, - &MockAccountKeeper{}, - &MockIBCTransferKeeper{}, - &MockIBCCoreKeeper{}, - "", - ) - return k, ctx +// NewMockedKeepers instantiates a struct with pointers to properly instantiated mocked keepers. +func NewMockedKeepers(ctrl *gomock.Controller) MockedKeepers { + return MockedKeepers{ + MockScopedKeeper: NewMockScopedKeeper(ctrl), + MockChannelKeeper: NewMockChannelKeeper(ctrl), + MockPortKeeper: NewMockPortKeeper(ctrl), + MockConnectionKeeper: NewMockConnectionKeeper(ctrl), + MockClientKeeper: NewMockClientKeeper(ctrl), + MockStakingKeeper: NewMockStakingKeeper(ctrl), + MockSlashingKeeper: NewMockSlashingKeeper(ctrl), + MockAccountKeeper: NewMockAccountKeeper(ctrl), + MockBankKeeper: NewMockBankKeeper(ctrl), + MockIBCTransferKeeper: NewMockIBCTransferKeeper(ctrl), + MockIBCCoreKeeper: NewMockIBCCoreKeeper(ctrl), + } } -// Constructs a provider keeper for unit tests, backed by an in-memory db, -// with ability to pass mocked or otherwise manipulated parameters. -func GetProviderKeeperWithMocks( - cdc *codec.ProtoCodec, - storeKey *storetypes.KVStoreKey, - paramsSubspace paramstypes.Subspace, - capabilityKeeper capabilitykeeper.ScopedKeeper, - channelKeeper types.ChannelKeeper, - portKeeper types.PortKeeper, - connectionKeeper types.ConnectionKeeper, - clientKeeper types.ClientKeeper, - stakingKeeper types.StakingKeeper, - slashingKeeper types.SlashingKeeper, - accountKeeper types.AccountKeeper, -) providerkeeper.Keeper { +// NewInMemProviderKeeper instantiates an in-mem provider keeper from params and mocked keepers +func NewInMemProviderKeeper(params InMemKeeperParams, mocks MockedKeepers) providerkeeper.Keeper { return providerkeeper.NewKeeper( - cdc, - storeKey, - paramsSubspace, - capabilityKeeper, - channelKeeper, - portKeeper, - connectionKeeper, - clientKeeper, - stakingKeeper, - slashingKeeper, - accountKeeper, + params.Cdc, + params.StoreKey, + *params.ParamsSubspace, + mocks.MockScopedKeeper, + mocks.MockChannelKeeper, + mocks.MockPortKeeper, + mocks.MockConnectionKeeper, + mocks.MockClientKeeper, + mocks.MockStakingKeeper, + mocks.MockSlashingKeeper, + mocks.MockAccountKeeper, "", ) } -// Constructs a consumer keeper for unit tests, backed by an in-memory db, -// with ability to pass mocked or otherwise manipulated parameters. -func GetCustomConsumerKeeperWithMocks( - cdc *codec.ProtoCodec, - storeKey *storetypes.KVStoreKey, - paramsSubspace paramstypes.Subspace, - capabilityKeeper types.ScopedKeeper, - channelKeeper types.ChannelKeeper, - portKeeper types.PortKeeper, - connectionKeeper types.ConnectionKeeper, - clientKeeper types.ClientKeeper, - slashingKeeper types.SlashingKeeper, - bankKeeper types.BankKeeper, - accountKeeper types.AccountKeeper, - ibcTransferKeeper types.IBCTransferKeeper, - ibcCoreKeeper types.IBCCoreKeeper, -) consumerkeeper.Keeper { +// NewInMemConsumerKeeper instantiates an in-mem consumer keeper from params and mocked keepers +func NewInMemConsumerKeeper(params InMemKeeperParams, mocks MockedKeepers) consumerkeeper.Keeper { return consumerkeeper.NewKeeper( - cdc, - storeKey, - paramsSubspace, - capabilityKeeper, - channelKeeper, - portKeeper, - connectionKeeper, - clientKeeper, - slashingKeeper, - bankKeeper, - accountKeeper, - ibcTransferKeeper, - ibcCoreKeeper, + params.Cdc, + params.StoreKey, + *params.ParamsSubspace, + mocks.MockScopedKeeper, + mocks.MockChannelKeeper, + mocks.MockPortKeeper, + mocks.MockConnectionKeeper, + mocks.MockClientKeeper, + mocks.MockSlashingKeeper, + mocks.MockBankKeeper, + mocks.MockAccountKeeper, + mocks.MockIBCTransferKeeper, + mocks.MockIBCCoreKeeper, "", ) } -// Constructs a consumer keeper for unit tests, backed by an in-memory db, -// with ability to pass manipulated parameters, but no mocked keepers. -func GetCustomConsumerKeeper( - cdc *codec.ProtoCodec, - storeKey *storetypes.KVStoreKey, - paramsSubspace paramstypes.Subspace, -) consumerkeeper.Keeper { +// Returns an in-memory provider keeper, context, controller, and mocks, given a test instance and parameters. +// +// Note: Calling ctrl.Finish() at the end of a test function ensures that +// no unexpected calls to external keepers are made. +func GetProviderKeeperAndCtx(t *testing.T, params InMemKeeperParams) ( + providerkeeper.Keeper, sdk.Context, *gomock.Controller, MockedKeepers) { - return consumerkeeper.NewKeeper( - cdc, - storeKey, - paramsSubspace, - &MockScopedKeeper{}, - &MockChannelKeeper{}, - &MockPortKeeper{}, - &MockConnectionKeeper{}, - &MockClientKeeper{}, - &MockSlashingKeeper{}, - &MockBankKeeper{}, - &MockAccountKeeper{}, - &MockIBCTransferKeeper{}, - &MockIBCCoreKeeper{}, - "", - ) + ctrl := gomock.NewController(t) + mocks := NewMockedKeepers(ctrl) + return NewInMemProviderKeeper(params, mocks), params.Ctx, ctrl, mocks } -func SetupInMemKeeper(t testing.TB) (*codec.ProtoCodec, *storetypes.KVStoreKey, paramstypes.Subspace, sdk.Context) { - storeKey := sdk.NewKVStoreKey(types.StoreKey) - memStoreKey := storetypes.NewMemoryStoreKey(types.MemStoreKey) +// Return an in-memory consumer keeper, context, controller, and mocks, given a test instance and parameters. +// +// Note: Calling ctrl.Finish() at the end of a test function ensures that +// no unexpected calls to external keepers are made. +func GetConsumerKeeperAndCtx(t *testing.T, params InMemKeeperParams) ( + consumerkeeper.Keeper, sdk.Context, *gomock.Controller, MockedKeepers) { - db := tmdb.NewMemDB() - stateStore := store.NewCommitMultiStore(db) - stateStore.MountStoreWithDB(storeKey, sdk.StoreTypeIAVL, db) - stateStore.MountStoreWithDB(memStoreKey, sdk.StoreTypeMemory, nil) - require.NoError(t, stateStore.LoadLatestVersion()) + ctrl := gomock.NewController(t) + mocks := NewMockedKeepers(ctrl) + return NewInMemConsumerKeeper(params, mocks), params.Ctx, ctrl, mocks +} - registry := codectypes.NewInterfaceRegistry() - cdc := codec.NewProtoCodec(registry) +// Sets a template client state for a params subspace so that the provider's +// GetTemplateClient method will be satisfied. +func (params *InMemKeeperParams) SetTemplateClientState(customState *ibctmtypes.ClientState) { - paramsSubspace := paramstypes.NewSubspace(cdc, - codec.NewLegacyAmino(), - storeKey, - memStoreKey, - paramstypes.ModuleName, - ) - ctx := sdk.NewContext(stateStore, tmproto.Header{}, false, log.NewNopLogger()) - return cdc, storeKey, paramsSubspace, ctx + keyTable := paramstypes.NewKeyTable(paramstypes.NewParamSetPair( + providertypes.KeyTemplateClient, &ibctmtypes.ClientState{}, + func(value interface{}) error { return nil })) + + newSubspace := params.ParamsSubspace.WithKeyTable(keyTable) + params.ParamsSubspace = &newSubspace + + // Default template client state if none provided + if customState == nil { + customState = ibctmtypes.NewClientState("", ibctmtypes.DefaultTrustLevel, 0, 0, + time.Second*10, clienttypes.Height{}, commitmenttypes.GetSDKSpecs(), + []string{"upgrade", "upgradedIBCState"}, true, true) + } + + params.ParamsSubspace.Set(params.Ctx, providertypes.KeyTemplateClient, customState) +} + +// Registers proto interfaces for params.Cdc +// +// For now, we explicitly force certain unit tests to register sdk crypto interfaces. +// TODO: This function will be executed automatically once https://github.com/cosmos/interchain-security/issues/273 is solved. +func (params *InMemKeeperParams) RegisterSdkCryptoCodecInterfaces() { + ir := codectypes.NewInterfaceRegistry() + // Public key implementation registered here + cryptocodec.RegisterInterfaces(ir) + // Replace default cdc, with a custom (registered) codec + params.Cdc = codec.NewProtoCodec(ir) } type PrivateKey struct { @@ -196,3 +208,32 @@ func GenPubKey() (crypto.PubKey, error) { privKey := PrivateKey{ed25519.GenPrivKey()} return cryptocodec.ToTmPubKeyInterface(privKey.PrivKey.PubKey()) } + +func GetClientState(chainID string) *ibctmtypes.ClientState { + return ibctmtypes.NewClientState(chainID, ibctmtypes.DefaultTrustLevel, 0, 0, + time.Second*10, clienttypes.Height{}, commitmenttypes.GetSDKSpecs(), + []string{"upgrade", "upgradedIBCState"}, true, true) +} + +func GetConsensusState(clientID string, timestamp time.Time, vals ...*tmtypes.Validator) *ibctmtypes.ConsensusState { + return ibctmtypes.NewConsensusState(timestamp, commitmenttypes.NewMerkleRoot([]byte("apphash")), + tmtypes.NewValidatorSet(vals).Hash()[:]) +} + +// SetupForStoppingConsumerChain registers expected mock calls and corresponding state setup +// which asserts that a consumer chain was properly stopped from StopConsumerChain(). +func SetupForStoppingConsumerChain(t *testing.T, ctx sdk.Context, + providerKeeper *providerkeeper.Keeper, mocks MockedKeepers) { + + expectations := GetMocksForCreateConsumerClient(ctx, &mocks, + "chainID", clienttypes.NewHeight(2, 3)) + expectations = append(expectations, GetMocksForSetConsumerChain(ctx, &mocks, "chainID")...) + expectations = append(expectations, GetMocksForStopConsumerChain(ctx, &mocks)...) + + gomock.InOrder(expectations...) + + err := providerKeeper.CreateConsumerClient(ctx, "chainID", clienttypes.NewHeight(2, 3), false) + require.NoError(t, err) + err = providerKeeper.SetConsumerChain(ctx, "channelID") + require.NoError(t, err) +} diff --git a/testutil/simapp/simapp.go b/testutil/simapp/simapp.go index f18de89c9f..cd76500be6 100644 --- a/testutil/simapp/simapp.go +++ b/testutil/simapp/simapp.go @@ -13,6 +13,7 @@ import ( tmdb "github.com/tendermint/tm-db" appConsumer "github.com/cosmos/interchain-security/app/consumer" + appConsumerDemocracy "github.com/cosmos/interchain-security/app/consumer-democracy" appProvider "github.com/cosmos/interchain-security/app/provider" ) @@ -24,6 +25,14 @@ func SetupTestingappProvider() (ibctesting.TestingApp, map[string]json.RawMessag return testApp, appProvider.NewDefaultGenesisState(encoding.Marshaler) } +func SetupTestingAppConsumerDemocracy() (ibctesting.TestingApp, map[string]json.RawMessage) { + db := tmdb.NewMemDB() + // encCdc := app.MakeTestEncodingConfig() + encoding := cosmoscmd.MakeEncodingConfig(appConsumerDemocracy.ModuleBasics) + testApp := appConsumerDemocracy.New(log.NewNopLogger(), db, nil, true, map[int64]bool{}, simapp.DefaultNodeHome, 5, encoding, simapp.EmptyAppOptions{}).(ibctesting.TestingApp) + return testApp, appConsumerDemocracy.NewDefaultGenesisState(encoding.Marshaler) +} + func SetupTestingAppConsumer() (ibctesting.TestingApp, map[string]json.RawMessage) { db := tmdb.NewMemDB() // encCdc := app.MakeTestEncodingConfig() @@ -55,3 +64,16 @@ func NewProviderConsumerCoordinator(t *testing.T) (*ibctesting.Coordinator, *ibc consumerChain := coordinator.GetChain(chainID) return coordinator, providerChain, consumerChain } + +// NewCoordinator initializes Coordinator with provider and democracy consumer TestChains +func NewProviderConsumerDemocracyCoordinator(t *testing.T) (*ibctesting.Coordinator, *ibctesting.TestChain, *ibctesting.TestChain) { + coordinator := NewBasicCoordinator(t) + chainID := ibctesting.GetChainID(1) + coordinator.Chains[chainID] = ibctesting.NewTestChain(t, coordinator, SetupTestingappProvider, chainID) + providerChain := coordinator.GetChain(chainID) + chainID = ibctesting.GetChainID(2) + coordinator.Chains[chainID] = ibctesting.NewTestChainWithValSet(t, coordinator, + SetupTestingAppConsumerDemocracy, chainID, providerChain.Vals, providerChain.Signers) + consumerChain := coordinator.GetChain(chainID) + return coordinator, providerChain, consumerChain +} diff --git a/third_party/proto/cosmos/staking/v1beta1/staking.proto b/third_party/proto/cosmos/staking/v1beta1/staking.proto index 61fd247d1d..27581a12cf 100644 --- a/third_party/proto/cosmos/staking/v1beta1/staking.proto +++ b/third_party/proto/cosmos/staking/v1beta1/staking.proto @@ -9,6 +9,7 @@ import "google/protobuf/timestamp.proto"; import "cosmos_proto/cosmos.proto"; import "cosmos/base/v1beta1/coin.proto"; import "tendermint/types/types.proto"; +import "tendermint/abci/types.proto"; option go_package = "github.com/cosmos/cosmos-sdk/x/staking/types"; @@ -118,8 +119,11 @@ message Validator { (gogoproto.nullable) = false ]; - // True if this validator's unbonding has been stopped by an external module - bool unbonding_on_hold = 12; + // strictly positive if this validator's unbonding has been stopped by external modules + int64 unbonding_on_hold_ref_count = 12; + + // list of unbonding ids, each uniquely identifing an unbonding of this validator + repeated uint64 unbonding_ids = 13; } // BondStatus is the status of a validator. @@ -233,8 +237,8 @@ message UnbondingDelegationEntry { // Incrementing id that uniquely identifies this entry uint64 unbonding_id = 5; - // True if this entry's unbonding has been stopped by an external module - bool unbonding_on_hold = 6; + // Strictly positive if this entry's unbonding has been stopped by external modules + int64 unbonding_on_hold_ref_count = 6; } // RedelegationEntry defines a redelegation object with relevant metadata. @@ -260,8 +264,8 @@ message RedelegationEntry { // Incrementing id that uniquely identifies this entry uint64 unbonding_id = 5; - // True if this entry's unbonding has been stopped by an external module - bool unbonding_on_hold = 6; + // Strictly positive if this entry's unbonding has been stopped by external modules + int64 unbonding_on_hold_ref_count = 6; } // Redelegation contains the list of a particular delegator's redelegating bonds @@ -358,4 +362,9 @@ enum InfractionType { INFRACTION_TYPE_DOUBLE_SIGN = 1 [(gogoproto.enumvalue_customname) = "DoubleSign"]; // DOWNTIME defines a validator that missed signing too many blocks. INFRACTION_TYPE_DOWNTIME = 2 [(gogoproto.enumvalue_customname) = "Downtime"]; +} + +// ValidatorUpdates defines an array of abci.ValidatorUpdate objects. +message ValidatorUpdates { + repeated tendermint.abci.ValidatorUpdate updates = 1 [(gogoproto.nullable) = false]; } \ No newline at end of file diff --git a/x/ccv/consumer/ibc_module.go b/x/ccv/consumer/ibc_module.go index 77527c34f2..b16457e2fb 100644 --- a/x/ccv/consumer/ibc_module.go +++ b/x/ccv/consumer/ibc_module.go @@ -55,7 +55,7 @@ func (am AppModule) OnChanOpenInit( return err } - return am.keeper.VerifyProviderChain(ctx, channelID, connectionHops) + return am.keeper.VerifyProviderChain(ctx, connectionHops) } // validateCCVChannelParams validates a ccv channel diff --git a/x/ccv/consumer/ibc_module_test.go b/x/ccv/consumer/ibc_module_test.go new file mode 100644 index 0000000000..67eed2568c --- /dev/null +++ b/x/ccv/consumer/ibc_module_test.go @@ -0,0 +1,374 @@ +package consumer_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + transfertypes "github.com/cosmos/ibc-go/v3/modules/apps/transfer/types" + conntypes "github.com/cosmos/ibc-go/v3/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v3/modules/core/24-host" + testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + "github.com/cosmos/interchain-security/x/ccv/consumer" + consumerkeeper "github.com/cosmos/interchain-security/x/ccv/consumer/keeper" + providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" + ccv "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +// TestOnChanOpenInit validates the consumer's OnChanOpenInit implementation against the spec. +// Additional validation for VerifyProviderChain can be found in it's unit test. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-ccf-coinit1 +// Spec tag: [CCV-CCF-COINIT.1] +func TestOnChanOpenInit(t *testing.T) { + + // Params for the OnChanOpenInit method + type params struct { + ctx sdk.Context + order channeltypes.Order + connectionHops []string + portID string + channelID string + chanCap *capabilitytypes.Capability + counterparty channeltypes.Counterparty + version string + } + + testCases := []struct { + name string + // Test-case specific function that mutates method parameters and setups expected mock calls + setup func(*consumerkeeper.Keeper, *params, testkeeper.MockedKeepers) + expPass bool + }{ + { + "success", func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + gomock.InOrder( + mocks.MockScopedKeeper.EXPECT().ClaimCapability( + params.ctx, params.chanCap, host.ChannelCapabilityPath( + params.portID, params.channelID)).Return(nil).Times(1), + mocks.MockConnectionKeeper.EXPECT().GetConnection( + params.ctx, "connectionIDToProvider").Return( + conntypes.ConnectionEnd{ClientId: "clientIDToProvider"}, true).Times(1), + ) + }, true, + }, + { + "invalid: channel to provider already established", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + keeper.SetProviderChannel(params.ctx, "existingProviderChanID") + }, false, + }, + { + "invalid: UNORDERED channel", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + params.order = channeltypes.UNORDERED + }, false, + }, + { + "invalid port ID, not CCV port", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + params.portID = "someDingusPortID" + }, false, + }, + { + "invalid version", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + params.version = "someDingusVer" + }, false, + }, + { + "invalid counterparty port ID", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + params.counterparty.PortId = "someOtherDingusPortID" + }, false, + }, + { + "invalid clientID to provider", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + gomock.InOrder( + mocks.MockScopedKeeper.EXPECT().ClaimCapability( + params.ctx, params.chanCap, host.ChannelCapabilityPath( + params.portID, params.channelID)).Return(nil).Times(1), + mocks.MockConnectionKeeper.EXPECT().GetConnection( + params.ctx, "connectionIDToProvider").Return( + conntypes.ConnectionEnd{ClientId: "unexpectedClientID"}, true).Times(1), // unexpected clientID + ) + }, false, + }, + } + + for _, tc := range testCases { + + // Common setup + consumerKeeper, ctx, ctrl, mocks := testkeeper.GetConsumerKeeperAndCtx( + t, testkeeper.NewInMemKeeperParams(t)) + consumerModule := consumer.NewAppModule(consumerKeeper) + + consumerKeeper.SetPort(ctx, ccv.ConsumerPortID) + consumerKeeper.SetProviderClientID(ctx, "clientIDToProvider") + + // Instantiate valid params as default. Individual test cases mutate these as needed. + params := params{ + ctx: ctx, + order: channeltypes.ORDERED, + connectionHops: []string{"connectionIDToProvider"}, + portID: ccv.ConsumerPortID, + channelID: "consumerChannelID", + chanCap: &capabilitytypes.Capability{}, + counterparty: channeltypes.NewCounterparty(ccv.ProviderPortID, "providerChannelID"), + version: ccv.Version, + } + + tc.setup(&consumerKeeper, ¶ms, mocks) + + err := consumerModule.OnChanOpenInit( + params.ctx, + params.order, + params.connectionHops, + params.portID, + params.channelID, + params.chanCap, + params.counterparty, + params.version, + ) + + if tc.expPass { + require.NoError(t, err) + } else { + require.Error(t, err) + } + // Confirm there are no unexpected external keeper calls + ctrl.Finish() + } +} + +// TestOnChanOpenTry validates the consumer's OnChanOpenTry implementation against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-ccf-cotry1 +// Spec tag: [CCV-CCF-COTRY.1] +func TestOnChanOpenTry(t *testing.T) { + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + // No external keeper methods should be called + defer ctrl.Finish() + consumerModule := consumer.NewAppModule(consumerKeeper) + + // OnOpenTry must error even with correct arguments + _, err := consumerModule.OnChanOpenTry( + ctx, + channeltypes.ORDERED, + []string{"connection-1"}, + ccv.ConsumerPortID, + "channel-1", + nil, + channeltypes.NewCounterparty(ccv.ProviderPortID, "channel-1"), + ccv.Version, + ) + require.Error(t, err, "OnChanOpenTry callback must error on consumer chain") +} + +// TestOnChanOpenAck validates the consumer's OnChanOpenAck implementation against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-ccf-coack1 +// Spec tag: [CCV-CCF-COACK.1] +func TestOnChanOpenAck(t *testing.T) { + + // Params for the OnChanOpenAck method + type params struct { + ctx sdk.Context + portID string + channelID string + counterpartyChannelID string + counterpartyMetadata string + } + + testCases := []struct { + name string + // Test-case specific function that mutates method parameters and setups expected mock calls + setup func(*consumerkeeper.Keeper, *params, testkeeper.MockedKeepers) + expPass bool + }{ + { + "success", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + // Expected msg + distrTransferMsg := channeltypes.NewMsgChannelOpenInit( + transfertypes.PortID, + transfertypes.Version, + channeltypes.UNORDERED, + []string{"connectionID"}, + transfertypes.PortID, + "", // signer unused + ) + + // Expected mock calls + gomock.InOrder( + mocks.MockChannelKeeper.EXPECT().GetChannel( + params.ctx, params.portID, params.channelID).Return(channeltypes.Channel{ + ConnectionHops: []string{"connectionID"}, + }, true).Times(1), + mocks.MockIBCCoreKeeper.EXPECT().ChannelOpenInit( + sdk.WrapSDKContext(params.ctx), distrTransferMsg).Return( + &channeltypes.MsgChannelOpenInitResponse{}, nil, + ).Times(1), + ) + }, + true, + }, + { + "invalid: provider channel already established", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + keeper.SetProviderChannel(params.ctx, "existingProviderChannelID") + }, false, + }, + { + "invalid: cannot unmarshal ack metadata ", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + params.counterpartyMetadata = "bunkData" + }, false, + }, + { + "invalid: mismatched serialized version", + func(keeper *consumerkeeper.Keeper, params *params, mocks testkeeper.MockedKeepers) { + md := providertypes.HandshakeMetadata{ + ProviderFeePoolAddr: "", // dummy address used + Version: "bunkVersion", + } + metadataBz, err := md.Marshal() + require.NoError(t, err) + params.counterpartyMetadata = string(metadataBz) + }, false, + }, + } + + for _, tc := range testCases { + // Common setup + consumerKeeper, ctx, ctrl, mocks := testkeeper.GetConsumerKeeperAndCtx( + t, testkeeper.NewInMemKeeperParams(t)) + consumerModule := consumer.NewAppModule(consumerKeeper) + + // Instantiate valid params as default. Individual test cases mutate these as needed. + params := params{ + ctx: ctx, + portID: ccv.ConsumerPortID, + channelID: "consumerCCVChannelID", + counterpartyChannelID: "providerCCVChannelID", + } + + metadata := providertypes.HandshakeMetadata{ + ProviderFeePoolAddr: "someAcct", + Version: ccv.Version, + } + + metadataBz, err := metadata.Marshal() + require.NoError(t, err) + + params.counterpartyMetadata = string(metadataBz) + + tc.setup(&consumerKeeper, ¶ms, mocks) + + err = consumerModule.OnChanOpenAck( + params.ctx, + params.portID, + params.channelID, + params.counterpartyChannelID, + params.counterpartyMetadata, + ) + + if tc.expPass { + require.NoError(t, err) + // Confirm address of the distribution module account (on provider) was persisted on consumer + distModuleAcct := consumerKeeper.GetProviderFeePoolAddrStr(ctx) + require.Equal(t, "someAcct", distModuleAcct) + } else { + require.Error(t, err) + } + // Confirm there are no unexpected external keeper calls + ctrl.Finish() + } +} + +// TestOnChanOpenConfirm validates the consumer's OnChanOpenConfirm implementation against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-ccf-coconfirm1 +// Spec tag: [CCV-CCF-COCONFIRM.1] +func TestOnChanOpenConfirm(t *testing.T) { + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + consumerModule := consumer.NewAppModule(consumerKeeper) + + err := consumerModule.OnChanOpenConfirm(ctx, ccv.ConsumerPortID, "channel-1") + require.Error(t, err, "OnChanOpenConfirm callback must error on consumer chain") +} + +// TestOnChanCloseInit validates the consumer's OnChanCloseInit implementation against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-ccf-ccinit1 +// Spec tag: [CCV-CCF-CCINIT.1] +func TestOnChanCloseInit(t *testing.T) { + + testCases := []struct { + name string + channelToClose string + establishedProviderExists bool + expPass bool + }{ + { + name: "No established provider channel, error returned disallowing closing of channel", + channelToClose: "someChannelID", + establishedProviderExists: false, + expPass: false, + }, + { + name: "Provider channel is established, User CANNOT close established provider channel", + channelToClose: "provider", + establishedProviderExists: true, + expPass: false, + }, + { + name: "User CAN close duplicate channel that is NOT established provider", + channelToClose: "someChannelID", + establishedProviderExists: true, + expPass: true, + }, + } + + for _, tc := range testCases { + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + consumerModule := consumer.NewAppModule(consumerKeeper) + + if tc.establishedProviderExists { + consumerKeeper.SetProviderChannel(ctx, "provider") + } + + err := consumerModule.OnChanCloseInit(ctx, "portID", tc.channelToClose) + + if tc.expPass { + require.NoError(t, err) + } else { + require.Error(t, err) + } + ctrl.Finish() + } +} + +// TestOnChanCloseConfirm validates the consumer's OnChanCloseConfirm implementation against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-ccconfirm1// Spec tag: [CCV-CCF-CCINIT.1] +// Spec tag: [CCV-PCF-CCCONFIRM.1] +func TestOnChanCloseConfirm(t *testing.T) { + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + + // No external keeper methods should be called + defer ctrl.Finish() + + consumerModule := consumer.NewAppModule(consumerKeeper) + + // Nothing happens, no error returned + err := consumerModule.OnChanCloseConfirm(ctx, "portID", "channelID") + require.NoError(t, err) +} diff --git a/x/ccv/consumer/keeper/genesis.go b/x/ccv/consumer/keeper/genesis.go index 93fa8d8b6b..74873b96b2 100644 --- a/x/ccv/consumer/keeper/genesis.go +++ b/x/ccv/consumer/keeper/genesis.go @@ -6,6 +6,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" ibctmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" + "github.com/cosmos/interchain-security/x/ccv/consumer/types" consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" ccv "github.com/cosmos/interchain-security/x/ccv/types" utils "github.com/cosmos/interchain-security/x/ccv/utils" @@ -17,6 +18,8 @@ import ( // InitGenesis initializes the CCV consumer state and binds to PortID. func (k Keeper) InitGenesis(ctx sdk.Context, state *consumertypes.GenesisState) []abci.ValidatorUpdate { k.SetParams(ctx, state.Params) + // TODO: Remove enabled flag and find a better way to setup e2e tests + // See: https://github.com/cosmos/interchain-security/issues/339 if !state.Params.Enabled { return nil } @@ -87,6 +90,11 @@ func (k Keeper) InitGenesis(ctx sdk.Context, state *consumertypes.GenesisState) unbondingTime := utils.ComputeConsumerUnbondingPeriod(tmClientState.UnbondingPeriod) k.SetUnbondingTime(ctx, unbondingTime) + // set height to valset update id mapping + for _, h2v := range state.HeightToValsetUpdateId { + k.SetHeightValsetUpdateID(ctx, h2v.Height, h2v.ValsetUpdateId) + } + // set provider client id k.SetProviderClientID(ctx, state.ProviderClientId) // set provider channel id. @@ -97,6 +105,7 @@ func (k Keeper) InitGenesis(ctx sdk.Context, state *consumertypes.GenesisState) } } + // populate cross chain validators states with initial valset k.ApplyCCValidatorChanges(ctx, state.InitialValSet) return state.InitialValSet @@ -104,56 +113,89 @@ func (k Keeper) InitGenesis(ctx sdk.Context, state *consumertypes.GenesisState) // ExportGenesis exports the CCV consumer state. If the channel has already been established, then we export // provider chain. Otherwise, this is still considered a new chain and we export latest client state. -func (k Keeper) ExportGenesis(ctx sdk.Context) *consumertypes.GenesisState { +func (k Keeper) ExportGenesis(ctx sdk.Context) (genesis *consumertypes.GenesisState) { params := k.GetParams(ctx) if !params.Enabled { return consumertypes.DefaultGenesisState() } + // export the current validator set + valset, err := k.GetValidatorUpdates(ctx) + if err != nil { + panic(fmt.Sprintf("fail to retrieve the validator set: %s", err)) + } + + // export all the states created after a provider channel got established if channelID, ok := k.GetProviderChannel(ctx); ok { clientID, ok := k.GetProviderClientID(ctx) if !ok { panic("provider client does not exist") } - // ValUpdates must be filled in off-line - gs := consumertypes.NewRestartGenesisState(clientID, channelID, nil, nil, params) - maturingPackets := []consumertypes.MaturingVSCPacket{} - cb := func(vscId, timeNs uint64) bool { - mat := consumertypes.MaturingVSCPacket{ + maturingPackets := []types.MaturingVSCPacket{} + k.IteratePacketMaturityTime(ctx, func(vscId, timeNs uint64) bool { + mat := types.MaturingVSCPacket{ VscId: vscId, MaturityTime: timeNs, } maturingPackets = append(maturingPackets, mat) return false - } - k.IteratePacketMaturityTime(ctx, cb) + }) - gs.MaturingPackets = maturingPackets - return gs - } - clientID, ok := k.GetProviderClientID(ctx) - // if provider clientID and channelID don't exist on the consumer chain, then CCV protocol is disabled for this chain - // return a disabled genesis state - if !ok { - return consumertypes.DefaultGenesisState() - } - cs, ok := k.clientKeeper.GetClientState(ctx, clientID) - if !ok { - panic("provider client not set on already running consumer chain") - } - tmCs, ok := cs.(*ibctmtypes.ClientState) - if !ok { - panic("provider client consensus state is not tendermint client state") - } - consState, ok := k.clientKeeper.GetLatestClientConsensusState(ctx, clientID) - if !ok { - panic("provider consensus state not set on already running consumer chain") - } - tmConsState, ok := consState.(*ibctmtypes.ConsensusState) - if !ok { - panic("provider consensus state is not tendermint consensus state") + heightToVCIDs := []types.HeightToValsetUpdateID{} + k.IterateHeightToValsetUpdateID(ctx, func(height, vscID uint64) bool { + hv := types.HeightToValsetUpdateID{ + Height: height, + ValsetUpdateId: vscID, + } + heightToVCIDs = append(heightToVCIDs, hv) + return true + }) + + outstandingDowntimes := []types.OutstandingDowntime{} + k.IterateOutstandingDowntime(ctx, func(addr string) bool { + od := types.OutstandingDowntime{ + ValidatorConsensusAddress: addr, + } + outstandingDowntimes = append(outstandingDowntimes, od) + return false + }) + + genesis = types.NewRestartGenesisState( + clientID, + channelID, + maturingPackets, + valset, + heightToVCIDs, + outstandingDowntimes, + params, + ) + } else { + clientID, ok := k.GetProviderClientID(ctx) + // if provider clientID and channelID don't exist on the consumer chain, then CCV protocol is disabled for this chain + // return a disabled genesis state + if !ok { + return consumertypes.DefaultGenesisState() + } + cs, ok := k.clientKeeper.GetClientState(ctx, clientID) + if !ok { + panic("provider client not set on already running consumer chain") + } + tmCs, ok := cs.(*ibctmtypes.ClientState) + if !ok { + panic("provider client consensus state is not tendermint client state") + } + consState, ok := k.clientKeeper.GetLatestClientConsensusState(ctx, clientID) + if !ok { + panic("provider consensus state not set on already running consumer chain") + } + tmConsState, ok := consState.(*ibctmtypes.ConsensusState) + if !ok { + panic("provider consensus state is not tendermint consensus state") + } + // export client states and pending slashing requests into a new chain genesis + genesis = consumertypes.NewInitialGenesisState(tmCs, tmConsState, valset, k.GetPendingSlashRequests(ctx), params) } - // ValUpdates must be filled in off-line - return consumertypes.NewInitialGenesisState(tmCs, tmConsState, nil, params) + + return } diff --git a/x/ccv/consumer/keeper/genesis_test.go b/x/ccv/consumer/keeper/genesis_test.go new file mode 100644 index 0000000000..4077858303 --- /dev/null +++ b/x/ccv/consumer/keeper/genesis_test.go @@ -0,0 +1,265 @@ +package keeper_test + +import ( + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + host "github.com/cosmos/ibc-go/v3/modules/core/24-host" + testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + consumerkeeper "github.com/cosmos/interchain-security/x/ccv/consumer/keeper" + "github.com/cosmos/interchain-security/x/ccv/consumer/types" + ccv "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + tmtypes "github.com/tendermint/tendermint/types" + + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + + testutil "github.com/cosmos/interchain-security/testutil/keeper" +) + +func TestInitGenesis(t *testing.T) { + + // store consumer chain states in variables + + // create channel and client IDs for the consumer + channelID := "channelID" + clientID := "tendermint-07" + + // generate validator public key + pubKey, err := testutil.GenPubKey() + require.NoError(t, err) + + // create validator set with single validator + validator := tmtypes.NewValidator(pubKey, 1) + + // create consensus state using a single validator + consensusState := testutil.GetConsensusState(clientID, time.Time{}, validator) + + slashRequests := consumertypes.SlashRequests{ + Requests: []consumertypes.SlashRequest{{Infraction: stakingtypes.Downtime}}, + } + matPacket := consumertypes.MaturingVSCPacket{ + VscId: uint64(1), + MaturityTime: uint64(time.Now().UnixNano()), + } + + // create paramameters for a new chain + params := types.NewParams(true, types.DefaultBlocksPerDistributionTransmission, "", "") + + testCases := []struct { + name string + malleate func(sdk.Context, testutil.MockedKeepers) + genesis *consumertypes.GenesisState + assertStates func(sdk.Context, consumerkeeper.Keeper, *consumertypes.GenesisState) + }{ + { + name: "restart a new chain", + malleate: func(ctx sdk.Context, mocks testutil.MockedKeepers) { + gomock.InOrder( + expectGetCapabilityMock(ctx, mocks), + expectCreateClientMock(ctx, mocks, "", clientID, validator), + ) + }, + genesis: consumertypes.NewInitialGenesisState(testutil.GetClientState(""), consensusState, + []abci.ValidatorUpdate{tmtypes.TM2PB.ValidatorUpdate(validator)}, slashRequests, params), + + assertStates: func(ctx sdk.Context, ck consumerkeeper.Keeper, gs *consumertypes.GenesisState) { + require.Equal(t, gs.Params, ck.GetParams(ctx)) + require.Equal(t, ccv.ConsumerPortID, ck.GetPort(ctx)) + + ubdTime, found := ck.GetUnbondingTime(ctx) + require.True(t, found) + require.Equal(t, gs.ProviderClientState.UnbondingPeriod, ubdTime) + + require.Zero(t, ck.GetHeightValsetUpdateID(ctx, uint64(ctx.BlockHeight()))) + + cid, ok := ck.GetProviderClientID(ctx) + require.True(t, ok) + require.Equal(t, clientID, cid) + }, + }, { + name: "restart a chain with an already established channel", + malleate: func(ctx sdk.Context, mocks testutil.MockedKeepers) { + gomock.InOrder( + expectGetCapabilityMock(ctx, mocks), + expectLatestConsensusStateMock(ctx, mocks, clientID, validator), + expectGetClientStateMock(ctx, mocks, "", clientID), + ) + }, + genesis: consumertypes.NewRestartGenesisState(clientID, channelID, + []consumertypes.MaturingVSCPacket{matPacket}, + []abci.ValidatorUpdate{tmtypes.TM2PB.ValidatorUpdate(validator)}, + []consumertypes.HeightToValsetUpdateID{{ValsetUpdateId: matPacket.VscId, Height: uint64(0)}}, + []consumertypes.OutstandingDowntime{{ValidatorConsensusAddress: sdk.ConsAddress(validator.Bytes()).String()}}, + params, + ), + assertStates: func(ctx sdk.Context, ck consumerkeeper.Keeper, gs *consumertypes.GenesisState) { + require.Equal(t, gs.Params, ck.GetParams(ctx)) + require.Equal(t, ccv.ConsumerPortID, ck.GetPort(ctx)) + + ubdTime, found := ck.GetUnbondingTime(ctx) + require.True(t, found) + require.Equal(t, testutil.GetClientState("").UnbondingPeriod, ubdTime) + + // export states to genesis + require.Equal(t, matPacket.VscId, ck.GetHeightValsetUpdateID(ctx, uint64(0))) + + require.Equal(t, matPacket.MaturityTime, ck.GetPacketMaturityTime(ctx, matPacket.VscId)) + require.Equal(t, gs.Params, ck.GetParams(ctx)) + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + keeperParams := testkeeper.NewInMemKeeperParams(t) + // Explicitly register codec with public key interface + keeperParams.RegisterSdkCryptoCodecInterfaces() + consumerKeeper, ctx, ctrl, mocks := testkeeper.GetConsumerKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() + + // test setup + tc.malleate(ctx, mocks) + + // init the chain states using a genesis + consumerKeeper.InitGenesis(ctx, tc.genesis) + + // assert states + tc.assertStates(ctx, consumerKeeper, tc.genesis) + }) + } +} + +func TestExportGenesis(t *testing.T) { + + clientID := "tendermint-07" + channelID := "channelID" + + // define the states exported into genesis + slashRequests := consumertypes.SlashRequests{ + Requests: []consumertypes.SlashRequest{{Infraction: stakingtypes.Downtime}}, + } + restartHeight := uint64(0) + matPacket := consumertypes.MaturingVSCPacket{ + VscId: uint64(1), + MaturityTime: uint64(time.Now().UnixNano()), + } + + params := types.NewParams(true, types.DefaultBlocksPerDistributionTransmission, "", "") + + // create a single validator + pubKey := ed25519.GenPrivKey().PubKey() + tmPK, err := cryptocodec.ToTmPubKeyInterface(pubKey) + require.NoError(t, err) + validator := tmtypes.NewValidator(tmPK, 1) + + // create consensus state using a single validator + consensusState := testutil.GetConsensusState(clientID, time.Time{}, validator) + + testCases := []struct { + name string + malleate func(sdk.Context, consumerkeeper.Keeper, testutil.MockedKeepers) + expGenesis *consumertypes.GenesisState + }{ + { + name: "export a new chain", + malleate: func(ctx sdk.Context, ck consumerkeeper.Keeper, mocks testutil.MockedKeepers) { + // populate the states used by a new consumer chain + cVal, err := consumertypes.NewCCValidator(validator.Address.Bytes(), 1, pubKey) + require.NoError(t, err) + ck.SetCCValidator(ctx, cVal) + ck.SetProviderClientID(ctx, clientID) + ck.SetPendingSlashRequests( + ctx, + slashRequests, + ) + + // set the mock calls executed during the export + gomock.InOrder( + expectGetClientStateMock(ctx, mocks, "", clientID), + expectLatestConsensusStateMock(ctx, mocks, clientID, validator), + ) + }, + + expGenesis: consumertypes.NewInitialGenesisState(testutil.GetClientState(""), consensusState, + []abci.ValidatorUpdate{tmtypes.TM2PB.ValidatorUpdate(validator)}, slashRequests, params), + }, + { + name: "export a chain that has an established CCV channel", + malleate: func(ctx sdk.Context, ck consumerkeeper.Keeper, mocks testutil.MockedKeepers) { + // populate the states used by a running chain + cVal, err := consumertypes.NewCCValidator(validator.Address.Bytes(), 1, pubKey) + require.NoError(t, err) + ck.SetCCValidator(ctx, cVal) + ck.SetOutstandingDowntime(ctx, sdk.ConsAddress(validator.Address.Bytes())) + + // populate the required states to simulate a completed handshake + ck.SetProviderClientID(ctx, clientID) + ck.SetProviderChannel(ctx, channelID) + ck.SetHeightValsetUpdateID(ctx, restartHeight, matPacket.VscId) + ck.SetPacketMaturityTime(ctx, matPacket.VscId, matPacket.MaturityTime) + }, + expGenesis: consumertypes.NewRestartGenesisState( + clientID, + channelID, + []consumertypes.MaturingVSCPacket{matPacket}, + []abci.ValidatorUpdate{tmtypes.TM2PB.ValidatorUpdate(validator)}, + []types.HeightToValsetUpdateID{{Height: restartHeight, ValsetUpdateId: matPacket.VscId}}, + []consumertypes.OutstandingDowntime{{ValidatorConsensusAddress: sdk.ConsAddress(validator.Address.Bytes()).String()}}, + params, + ), + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + + keeperParams := testkeeper.NewInMemKeeperParams(t) + // Explicitly register codec with public key interface + keeperParams.RegisterSdkCryptoCodecInterfaces() + consumerKeeper, ctx, ctrl, mocks := testkeeper.GetConsumerKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() + consumerKeeper.SetParams(ctx, params) + + // test setup + tc.malleate(ctx, consumerKeeper, mocks) + + // export staet to genesis + gotGen := consumerKeeper.ExportGenesis(ctx) + + // check the obtained genesis + require.EqualValues(t, tc.expGenesis, gotGen) + }) + } +} + +func expectLatestConsensusStateMock(ctx sdk.Context, mocks testutil.MockedKeepers, clientID string, vals ...*tmtypes.Validator) *gomock.Call { + consState := testutil.GetConsensusState(clientID, time.Time{}, vals...) + return mocks.MockClientKeeper.EXPECT(). + GetLatestClientConsensusState(ctx, clientID).Return(consState, true).Times(1) +} + +func expectGetClientStateMock(ctx sdk.Context, mocks testutil.MockedKeepers, chainID, clientID string) *gomock.Call { + cs := testutil.GetClientState(chainID) + return mocks.MockClientKeeper.EXPECT().GetClientState(ctx, clientID).Return(cs, true).Times(1) +} + +func expectCreateClientMock(ctx sdk.Context, mocks testutil.MockedKeepers, chainID, clientID string, vals ...*tmtypes.Validator) *gomock.Call { + cs := testutil.GetClientState(chainID) + consState := testutil.GetConsensusState(clientID, time.Time{}, vals...) + + return mocks.MockClientKeeper.EXPECT().CreateClient(ctx, cs, consState).Return(clientID, nil).Times(1) +} + +func expectGetCapabilityMock(ctx sdk.Context, mocks testutil.MockedKeepers) *gomock.Call { + return mocks.MockScopedKeeper.EXPECT().GetCapability( + ctx, host.PortPath(ccv.ConsumerPortID), + ).Return(nil, true).Times(1) +} diff --git a/x/ccv/consumer/keeper/keeper.go b/x/ccv/consumer/keeper/keeper.go index f0dfb9edd5..c7433ffaf0 100644 --- a/x/ccv/consumer/keeper/keeper.go +++ b/x/ccv/consumer/keeper/keeper.go @@ -1,9 +1,7 @@ package keeper import ( - "bytes" "encoding/binary" - "encoding/json" "fmt" "time" @@ -122,7 +120,7 @@ func (k Keeper) GetPort(ctx sdk.Context) string { return string(store.Get(types.PortKey())) } -// SetPort sets the portID for the transfer module. Used in InitGenesis +// SetPort sets the portID for the CCV module. Used in InitGenesis func (k Keeper) SetPort(ctx sdk.Context, portID string) { store := ctx.KVStore(k.storeKey) store.Set(types.PortKey(), []byte(portID)) @@ -133,8 +131,7 @@ func (k Keeper) AuthenticateCapability(ctx sdk.Context, cap *capabilitytypes.Cap return k.scopedKeeper.AuthenticateCapability(ctx, cap, name) } -// ClaimCapability allows the transfer module that can claim a capability that IBC module -// passes to it +// ClaimCapability claims a capability that the IBC module passes to it func (k Keeper) ClaimCapability(ctx sdk.Context, cap *capabilitytypes.Capability, name string) error { return k.scopedKeeper.ClaimCapability(ctx, cap, name) } @@ -163,14 +160,14 @@ func (k Keeper) DeleteUnbondingTime(ctx sdk.Context) { store.Delete(types.UnbondingTimeKey()) } -// SetProviderClientID sets the provider clientID that is validating the chain. +// SetProviderClientID sets the clientID for the client to the provider. // Set in InitGenesis func (k Keeper) SetProviderClientID(ctx sdk.Context, clientID string) { store := ctx.KVStore(k.storeKey) store.Set(types.ProviderClientIDKey(), []byte(clientID)) } -// GetProviderClientID gets the provider clientID that is validating the chain. +// GetProviderClientID gets the clientID for the client to the provider. func (k Keeper) GetProviderClientID(ctx sdk.Context) (string, bool) { store := ctx.KVStore(k.storeKey) clientIdBytes := store.Get(types.ProviderClientIDKey()) @@ -180,13 +177,13 @@ func (k Keeper) GetProviderClientID(ctx sdk.Context) (string, bool) { return string(clientIdBytes), true } -// SetProviderChannel sets the provider channelID that is validating the chain. +// SetProviderChannel sets the channelID for the channel to the provider. func (k Keeper) SetProviderChannel(ctx sdk.Context, channelID string) { store := ctx.KVStore(k.storeKey) store.Set(types.ProviderChannelKey(), []byte(channelID)) } -// GetProviderChannel gets the provider channelID that is validating the chain. +// GetProviderChannel gets the channelID for the channel to the provider. func (k Keeper) GetProviderChannel(ctx sdk.Context) (string, bool) { store := ctx.KVStore(k.storeKey) channelIdBytes := store.Get(types.ProviderChannelKey()) @@ -196,7 +193,7 @@ func (k Keeper) GetProviderChannel(ctx sdk.Context) (string, bool) { return string(channelIdBytes), true } -// DeleteProviderChannel deletes the provider channel ID that is validating the chain. +// DeleteProviderChannel deletes the channelID for the channel to the provider. func (k Keeper) DeleteProviderChannel(ctx sdk.Context) { store := ctx.KVStore(k.storeKey) store.Delete(types.ProviderChannelKey()) @@ -270,7 +267,7 @@ func (k Keeper) GetPacketMaturityTime(ctx sdk.Context, vscId uint64) uint64 { return binary.BigEndian.Uint64(bz) } -// DeletePacketMaturityTime deletes the the maturity time for a given received VSC packet id +// DeletePacketMaturityTime deletes the packet maturity time for a given received VSC packet id func (k Keeper) DeletePacketMaturityTime(ctx sdk.Context, vscId uint64) { store := ctx.KVStore(k.storeKey) store.Delete(types.PacketMaturityTimeKey(vscId)) @@ -278,7 +275,7 @@ func (k Keeper) DeletePacketMaturityTime(ctx sdk.Context, vscId uint64) { // VerifyProviderChain verifies that the chain trying to connect on the channel handshake // is the expected provider chain. -func (k Keeper) VerifyProviderChain(ctx sdk.Context, channelID string, connectionHops []string) error { +func (k Keeper) VerifyProviderChain(ctx sdk.Context, connectionHops []string) error { if len(connectionHops) != 1 { return sdkerrors.Wrap(channeltypes.ErrTooManyConnectionHops, "must have direct connection to provider chain") } @@ -323,6 +320,24 @@ func (k Keeper) DeleteHeightValsetUpdateID(ctx sdk.Context, height uint64) { store.Delete(types.HeightValsetUpdateIDKey(height)) } +// IterateHeightToValsetUpdateID iterates over the block height to valset update ID mapping in store +func (k Keeper) IterateHeightToValsetUpdateID(ctx sdk.Context, cb func(height, vscID uint64) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.HeightValsetUpdateIDBytePrefix}) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + heightBytes := iterator.Key()[1:] + height := binary.BigEndian.Uint64(heightBytes) + + vscID := binary.BigEndian.Uint64(iterator.Value()) + + if !cb(height, vscID) { + break + } + } +} + // OutstandingDowntime returns the outstanding downtime flag for a given validator func (k Keeper) OutstandingDowntime(ctx sdk.Context, address sdk.ConsAddress) bool { store := ctx.KVStore(k.storeKey) @@ -336,9 +351,9 @@ func (k Keeper) SetOutstandingDowntime(ctx sdk.Context, address sdk.ConsAddress) store.Set(types.OutstandingDowntimeKey(address), []byte{}) } -// ClearOutstandingDowntime clears the outstanding downtime flag for a given validator -func (k Keeper) ClearOutstandingDowntime(ctx sdk.Context, address string) { - consAddr, err := sdk.ConsAddressFromBech32(address) +// DeleteOutstandingDowntime deletes the outstanding downtime flag for the given validator consensus address +func (k Keeper) DeleteOutstandingDowntime(ctx sdk.Context, consAddress string) { + consAddr, err := sdk.ConsAddressFromBech32(consAddress) if err != nil { return } @@ -346,6 +361,21 @@ func (k Keeper) ClearOutstandingDowntime(ctx sdk.Context, address string) { store.Delete(types.OutstandingDowntimeKey(consAddr)) } +// IterateOutstandingDowntime iterates over the validator addresses of outstanding downtime flags +func (k Keeper) IterateOutstandingDowntime(ctx sdk.Context, cb func(address string) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.OutstandingDowntimeBytePrefix}) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + addrBytes := iterator.Key()[1:] + addr := sdk.ConsAddress(addrBytes).String() + if !cb(addr) { + break + } + } +} + // SetCCValidator sets a cross-chain validator under its validator address func (k Keeper) SetCCValidator(ctx sdk.Context, v types.CrossChainValidator) { store := ctx.KVStore(k.storeKey) @@ -389,41 +419,42 @@ func (k Keeper) GetAllCCValidator(ctx sdk.Context) (validators []types.CrossChai } // SetPendingSlashRequests sets the pending slash requests in store -func (k Keeper) SetPendingSlashRequests(ctx sdk.Context, requests []types.SlashRequest) { +func (k Keeper) SetPendingSlashRequests(ctx sdk.Context, requests types.SlashRequests) { store := ctx.KVStore(k.storeKey) - buf := &bytes.Buffer{} - err := json.NewEncoder(buf).Encode(&requests) + bz, err := requests.Marshal() if err != nil { panic(fmt.Errorf("failed to encode slash request json: %w", err)) } - store.Set([]byte{types.PendingSlashRequestsBytePrefix}, buf.Bytes()) + store.Set([]byte{types.PendingSlashRequestsBytePrefix}, bz) } // GetPendingSlashRequest returns the pending slash requests in store -func (k Keeper) GetPendingSlashRequests(ctx sdk.Context) (requests []types.SlashRequest) { +func (k Keeper) GetPendingSlashRequests(ctx sdk.Context) types.SlashRequests { store := ctx.KVStore(k.storeKey) bz := store.Get([]byte{types.PendingSlashRequestsBytePrefix}) if bz == nil { - return + return types.SlashRequests{} } - buf := bytes.NewBuffer(bz) - err := json.NewDecoder(buf).Decode(&requests) + + var sr types.SlashRequests + err := sr.Unmarshal(bz) if err != nil { panic(fmt.Errorf("failed to decode slash request json: %w", err)) } - return -} - -// AppendPendingSlashRequests appends the given slash request to the pending slash requests in store -func (k Keeper) AppendPendingSlashRequests(ctx sdk.Context, req types.SlashRequest) { - requests := k.GetPendingSlashRequests(ctx) - requests = append(requests, req) - k.SetPendingSlashRequests(ctx, requests) + return sr } // ClearPendingSlashRequests clears the pending slash requests in store -func (k Keeper) ClearPendingSlashRequests(ctx sdk.Context) { +func (k Keeper) DeletePendingSlashRequests(ctx sdk.Context) { store := ctx.KVStore(k.storeKey) store.Delete([]byte{types.PendingSlashRequestsBytePrefix}) } + +// AppendPendingSlashRequests appends the given slash request to the pending slash requests in store +func (k Keeper) AppendPendingSlashRequests(ctx sdk.Context, req types.SlashRequest) { + sr := k.GetPendingSlashRequests(ctx) + srArray := sr.GetRequests() + srArray = append(srArray, req) + k.SetPendingSlashRequests(ctx, types.SlashRequests{Requests: srArray}) +} diff --git a/x/ccv/consumer/keeper/keeper_test.go b/x/ccv/consumer/keeper/keeper_test.go index 6c40a6b0bd..67a5db53ca 100644 --- a/x/ccv/consumer/keeper/keeper_test.go +++ b/x/ccv/consumer/keeper/keeper_test.go @@ -4,12 +4,13 @@ import ( "testing" "time" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/crypto/keys/ed25519" + sdk "github.com/cosmos/cosmos-sdk/types" + conntypes "github.com/cosmos/ibc-go/v3/modules/core/03-connection/types" testkeeper "github.com/cosmos/interchain-security/testutil/keeper" "github.com/cosmos/interchain-security/x/ccv/consumer/types" ccv "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" @@ -18,7 +19,10 @@ import ( // TestUnbondingTime tests getter and setter functionality for the unbonding period of a consumer chain func TestUnbondingTime(t *testing.T) { - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + _, ok := consumerKeeper.GetUnbondingTime(ctx) require.False(t, ok) unbondingPeriod := time.Hour * 24 * 7 * 3 @@ -30,7 +34,10 @@ func TestUnbondingTime(t *testing.T) { // TestProviderClientID tests getter and setter functionality for the client ID stored on consumer keeper func TestProviderClientID(t *testing.T) { - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + _, ok := consumerKeeper.GetProviderClientID(ctx) require.False(t, ok) consumerKeeper.SetProviderClientID(ctx, "someClientID") @@ -41,7 +48,10 @@ func TestProviderClientID(t *testing.T) { // TestProviderChannel tests getter and setter functionality for the channel ID stored on consumer keeper func TestProviderChannel(t *testing.T) { - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + _, ok := consumerKeeper.GetProviderChannel(ctx) require.False(t, ok) consumerKeeper.SetProviderChannel(ctx, "channelID") @@ -72,7 +82,9 @@ func TestPendingChanges(t *testing.T) { nil, ) - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + err = consumerKeeper.SetPendingChanges(ctx, pd) require.NoError(t, err) gotPd, ok := consumerKeeper.GetPendingChanges(ctx) @@ -86,7 +98,10 @@ func TestPendingChanges(t *testing.T) { // TestPacketMaturityTime tests getter, setter, and iterator functionality for the packet maturity time of a received VSC packet func TestPacketMaturityTime(t *testing.T) { - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + consumerKeeper.SetPacketMaturityTime(ctx, 1, 10) consumerKeeper.SetPacketMaturityTime(ctx, 2, 25) consumerKeeper.SetPacketMaturityTime(ctx, 5, 15) @@ -115,20 +130,11 @@ func TestPacketMaturityTime(t *testing.T) { // TestCrossChainValidator tests the getter, setter, and deletion method for cross chain validator records func TestCrossChainValidator(t *testing.T) { - // Construct a keeper with a custom codec - // TODO: Ensure all custom interfaces are registered in prod, see https://github.com/cosmos/interchain-security/issues/273 - _, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) - ir := codectypes.NewInterfaceRegistry() - - // Public key implementation must be registered - cryptocodec.RegisterInterfaces(ir) - cdc := codec.NewProtoCodec(ir) - - consumerKeeper := testkeeper.GetCustomConsumerKeeper( - cdc, - storeKey, - paramsSubspace, - ) + keeperParams := testkeeper.NewInMemKeeperParams(t) + // Explicitly register codec with public key interface + keeperParams.RegisterSdkCryptoCodecInterfaces() + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() // should return false _, found := consumerKeeper.GetCCValidator(ctx, ed25519.GenPrivKey().PubKey().Address()) @@ -165,13 +171,15 @@ func TestCrossChainValidator(t *testing.T) { // TestPendingSlashRequests tests the getter, setter, appending method, and deletion method for pending slash requests func TestPendingSlashRequests(t *testing.T) { - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() // prepare test setup by storing 10 pending slash requests - request := []types.SlashRequest{} + requests := []types.SlashRequest{} for i := 0; i < 10; i++ { - request = append(request, types.SlashRequest{}) - consumerKeeper.SetPendingSlashRequests(ctx, request) + requests = append(requests, types.SlashRequest{}) + consumerKeeper.SetPendingSlashRequests(ctx, types.SlashRequests{Requests: requests}) } // test set, append and clear operations @@ -185,7 +193,7 @@ func TestPendingSlashRequests(t *testing.T) { operation: func() { consumerKeeper.AppendPendingSlashRequests(ctx, types.SlashRequest{}) }, expLen: 11, }, { - operation: func() { consumerKeeper.ClearPendingSlashRequests(ctx) }, + operation: func() { consumerKeeper.DeletePendingSlashRequests(ctx) }, expLen: 0, }, } @@ -193,6 +201,89 @@ func TestPendingSlashRequests(t *testing.T) { for _, tc := range testCases { tc.operation() requests := consumerKeeper.GetPendingSlashRequests(ctx) - require.Len(t, requests, tc.expLen) + require.Len(t, requests.Requests, tc.expLen) + } +} + +// TestVerifyProviderChain tests the VerifyProviderChain method for the consumer keeper +func TestVerifyProviderChain(t *testing.T) { + + testCases := []struct { + name string + // State-mutating setup specific to this test case + mockSetup func(sdk.Context, testkeeper.MockedKeepers) + connectionHops []string + expError bool + }{ + { + name: "success", + mockSetup: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) { + gomock.InOrder( + mocks.MockConnectionKeeper.EXPECT().GetConnection( + ctx, "connectionID", + ).Return(conntypes.ConnectionEnd{ClientId: "clientID"}, true).Times(1), + ) + }, + connectionHops: []string{"connectionID"}, + expError: false, + }, + { + name: "connection hops is not length 1", + mockSetup: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) { + // Expect no calls to GetConnection(), VerifyProviderChain will return from first step. + gomock.InAnyOrder( + mocks.MockConnectionKeeper.EXPECT().GetConnection(gomock.Any(), gomock.Any()).Times(0), + ) + }, + connectionHops: []string{"connectionID", "otherConnID"}, + expError: true, + }, + { + name: "connection does not exist", + mockSetup: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) { + gomock.InOrder( + mocks.MockConnectionKeeper.EXPECT().GetConnection( + ctx, "connectionID").Return(conntypes.ConnectionEnd{}, + false, // Found is returned as false + ).Times(1), + ) + }, + connectionHops: []string{"connectionID"}, + expError: true, + }, + { + name: "found clientID does not match expectation", + mockSetup: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) { + gomock.InOrder( + mocks.MockConnectionKeeper.EXPECT().GetConnection( + ctx, "connectionID").Return( + conntypes.ConnectionEnd{ClientId: "unexpectedClientID"}, true, + ).Times(1), + ) + }, + connectionHops: []string{"connectionID"}, + expError: true, + }, + } + + for _, tc := range testCases { + + keeperParams := testkeeper.NewInMemKeeperParams(t) + consumerKeeper, ctx, ctrl, mocks := testkeeper.GetConsumerKeeperAndCtx(t, keeperParams) + + // Common setup + consumerKeeper.SetProviderClientID(ctx, "clientID") // Set expected provider clientID + + // Specific mock setup + tc.mockSetup(ctx, mocks) + + err := consumerKeeper.VerifyProviderChain(ctx, tc.connectionHops) + + if tc.expError { + require.Error(t, err, "invalid case did not return error") + } else { + require.NoError(t, err, "valid case returned error") + } + ctrl.Finish() } } diff --git a/x/ccv/consumer/keeper/params_test.go b/x/ccv/consumer/keeper/params_test.go index 3de23cb73f..0bc043d984 100644 --- a/x/ccv/consumer/keeper/params_test.go +++ b/x/ccv/consumer/keeper/params_test.go @@ -10,7 +10,8 @@ import ( // TestParams tests the default params set for a consumer chain, and related getters/setters func TestParams(t *testing.T) { - consumerKeeper, ctx := testkeeper.GetConsumerKeeperAndCtx(t) + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() consumerKeeper.SetParams(ctx, types.DefaultParams()) expParams := types.NewParams(false, 1000, "", "") // these are the default params, IBC suite independently sets enabled=true diff --git a/x/ccv/consumer/keeper/relay.go b/x/ccv/consumer/keeper/relay.go index 65f607bc17..b32ee25d8b 100644 --- a/x/ccv/consumer/keeper/relay.go +++ b/x/ccv/consumer/keeper/relay.go @@ -64,9 +64,10 @@ func (k Keeper) OnRecvVSCPacket(ctx sdk.Context, packet channeltypes.Packet, new // set height to VSC id mapping k.SetHeightValsetUpdateID(ctx, uint64(ctx.BlockHeight())+1, newChanges.ValsetUpdateId) - // set outstanding slashing flags to false + // remove outstanding slashing flags of the validators + // for which the slashing was acknowledged by the provider chain for _, addr := range newChanges.GetSlashAcks() { - k.ClearOutstandingDowntime(ctx, addr) + k.DeleteOutstandingDowntime(ctx, addr) } ack := channeltypes.NewResultAcknowledgement([]byte{byte(1)}) @@ -172,7 +173,7 @@ func (k Keeper) SendPendingSlashRequests(ctx sdk.Context) { } // iterate over pending slash requests in reverse order - requests := k.GetPendingSlashRequests(ctx) + requests := k.GetPendingSlashRequests(ctx).Requests for i := len(requests) - 1; i >= 0; i-- { slashReq := requests[i] @@ -201,7 +202,7 @@ func (k Keeper) SendPendingSlashRequests(ctx sdk.Context) { } // clear pending slash requests - k.ClearPendingSlashRequests(ctx) + k.DeletePendingSlashRequests(ctx) } // OnAcknowledgementPacket executes application logic for acknowledgments of sent VSCMatured and Slash packets diff --git a/x/ccv/consumer/keeper/relay_test.go b/x/ccv/consumer/keeper/relay_test.go index 95e5b7a2cd..028675793c 100644 --- a/x/ccv/consumer/keeper/relay_test.go +++ b/x/ccv/consumer/keeper/relay_test.go @@ -109,29 +109,8 @@ func TestOnRecvVSCPacket(t *testing.T) { }, } - // Instantiate custom keeper with mocks - ctrl := gomock.NewController(t) + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) defer ctrl.Finish() - cdc, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) - - mockScopedKeeper := testkeeper.NewMockScopedKeeper(ctrl) - mockChannelKeeper := testkeeper.NewMockChannelKeeper(ctrl) - - consumerKeeper := testkeeper.GetCustomConsumerKeeperWithMocks( - cdc, - storeKey, - paramsSubspace, - mockScopedKeeper, - mockChannelKeeper, - testkeeper.NewMockPortKeeper(ctrl), - testkeeper.NewMockConnectionKeeper(ctrl), - testkeeper.NewMockClientKeeper(ctrl), - testkeeper.NewMockSlashingKeeper(ctrl), - testkeeper.NewMockBankKeeper(ctrl), - testkeeper.NewMockAccountKeeper(ctrl), - testkeeper.NewMockIBCTransferKeeper(ctrl), - testkeeper.NewMockIBCCoreKeeper(ctrl), - ) // Set channel to provider, still in context of consumer chain consumerKeeper.SetProviderChannel(ctx, consumerCCVChannelID) @@ -181,29 +160,13 @@ func TestOnAcknowledgementPacket(t *testing.T) { // Channel ID on destination (counter party) chain channelIDOnDest := "ChannelIDOnDest" - // Instantiate custom keeper with mocks + // Instantiate in-mem keeper with mocks ctrl := gomock.NewController(t) defer ctrl.Finish() - cdc, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) - - mockScopedKeeper := testkeeper.NewMockScopedKeeper(ctrl) - mockChannelKeeper := testkeeper.NewMockChannelKeeper(ctrl) - - consumerKeeper := testkeeper.GetCustomConsumerKeeperWithMocks( - cdc, - storeKey, - paramsSubspace, - mockScopedKeeper, - mockChannelKeeper, - testkeeper.NewMockPortKeeper(ctrl), - testkeeper.NewMockConnectionKeeper(ctrl), - testkeeper.NewMockClientKeeper(ctrl), - testkeeper.NewMockSlashingKeeper(ctrl), - testkeeper.NewMockBankKeeper(ctrl), - testkeeper.NewMockAccountKeeper(ctrl), - testkeeper.NewMockIBCTransferKeeper(ctrl), - testkeeper.NewMockIBCCoreKeeper(ctrl), - ) + keeperParams := testkeeper.NewInMemKeeperParams(t) + mocks := testkeeper.NewMockedKeepers(ctrl) + consumerKeeper := testkeeper.NewInMemConsumerKeeper(keeperParams, mocks) + ctx := keeperParams.Ctx // Set an established provider channel for later in test consumerKeeper.SetProviderChannel(ctx, channelIDToProvider) @@ -235,20 +198,20 @@ func TestOnAcknowledgementPacket(t *testing.T) { dummyCap := &capabilitytypes.Capability{} gomock.InOrder( - mockScopedKeeper.EXPECT().GetCapability( + mocks.MockScopedKeeper.EXPECT().GetCapability( ctx, host.ChannelCapabilityPath(ccv.ConsumerPortID, channelIDToDestChain), ).Return(dummyCap, true).Times(1), // Due to input error ack, ChanCloseInit is called on channel to destination chain - mockChannelKeeper.EXPECT().ChanCloseInit( + mocks.MockChannelKeeper.EXPECT().ChanCloseInit( ctx, ccv.ConsumerPortID, channelIDToDestChain, dummyCap, ).Return(nil).Times(1), - mockScopedKeeper.EXPECT().GetCapability( + mocks.MockScopedKeeper.EXPECT().GetCapability( ctx, host.ChannelCapabilityPath(ccv.ConsumerPortID, channelIDToProvider), ).Return(dummyCap, true).Times(1), // Due to input error ack and existence of established channel to provider, // ChanCloseInit is called on channel to provider - mockChannelKeeper.EXPECT().ChanCloseInit( + mocks.MockChannelKeeper.EXPECT().ChanCloseInit( ctx, ccv.ConsumerPortID, channelIDToProvider, dummyCap, ).Return(nil).Times(1), ) diff --git a/x/ccv/consumer/keeper/validators.go b/x/ccv/consumer/keeper/validators.go index ed8c54a58a..eeb94d5c93 100644 --- a/x/ccv/consumer/keeper/validators.go +++ b/x/ccv/consumer/keeper/validators.go @@ -202,3 +202,21 @@ func (k Keeper) TrackHistoricalInfo(ctx sdk.Context) { // Set latest HistoricalInfo at current height k.SetHistoricalInfo(ctx, ctx.BlockHeight(), &historicalEntry) } + +// ValidatorUpdates gets all cross-chain validators converted to the ABCI validator update type +func (k Keeper) GetValidatorUpdates(ctx sdk.Context) ([]abci.ValidatorUpdate, error) { + vals := k.GetAllCCValidator(ctx) + valUpdates := make([]abci.ValidatorUpdate, 0, len(vals)) + for _, v := range vals { + pk, err := v.ConsPubKey() + if err != nil { + return nil, err + } + tmPK, err := cryptocodec.ToTmProtoPublicKey(pk) + if err != nil { + return nil, err + } + valUpdates = append(valUpdates, abci.ValidatorUpdate{PubKey: tmPK, Power: v.Power}) + } + return valUpdates, nil +} diff --git a/x/ccv/consumer/keeper/validators_test.go b/x/ccv/consumer/keeper/validators_test.go index 15f14ed1b5..2b4f1484fe 100644 --- a/x/ccv/consumer/keeper/validators_test.go +++ b/x/ccv/consumer/keeper/validators_test.go @@ -3,8 +3,6 @@ package keeper_test import ( "testing" - "github.com/cosmos/cosmos-sdk/codec" - codectypes "github.com/cosmos/cosmos-sdk/codec/types" cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" sdk "github.com/cosmos/cosmos-sdk/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" @@ -19,19 +17,12 @@ import ( // TestApplyCCValidatorChanges tests the ApplyCCValidatorChanges method for a consumer keeper func TestApplyCCValidatorChanges(t *testing.T) { - // Construct a keeper with a custom codec - _, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) - ir := codectypes.NewInterfaceRegistry() - // Public key implementation must be registered - cryptocodec.RegisterInterfaces(ir) - cdc := codec.NewProtoCodec(ir) - - consumerKeeper := testkeeper.GetCustomConsumerKeeper( - cdc, - storeKey, - paramsSubspace, - ) + keeperParams := testkeeper.NewInMemKeeperParams(t) + // Explicitly register cdc with public key interface + keeperParams.RegisterSdkCryptoCodecInterfaces() + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() // utility functions getCCVals := func() (vals []types.CrossChainValidator) { @@ -116,20 +107,11 @@ func TestApplyCCValidatorChanges(t *testing.T) { // Tests the getter and setter behavior for historical info func TestHistoricalInfo(t *testing.T) { - // Construct a keeper with a custom codec - _, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) - ir := codectypes.NewInterfaceRegistry() - - // Public key implementation must be registered - cryptocodec.RegisterInterfaces(ir) - cdc := codec.NewProtoCodec(ir) - - consumerKeeper := testkeeper.GetCustomConsumerKeeper( - cdc, - storeKey, - paramsSubspace, - ) - + keeperParams := testkeeper.NewInMemKeeperParams(t) + // Explicitly register cdc with public key interface + keeperParams.RegisterSdkCryptoCodecInterfaces() + consumerKeeper, ctx, ctrl, _ := testkeeper.GetConsumerKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() ctx = ctx.WithBlockHeight(15) // Generate test validators, save them to store, and retrieve stored records diff --git a/x/ccv/consumer/module.go b/x/ccv/consumer/module.go index bb6a9d7cbf..3c8080d5c1 100644 --- a/x/ccv/consumer/module.go +++ b/x/ccv/consumer/module.go @@ -150,9 +150,6 @@ func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { // the CCV channel was established, but it was then closed; // the consumer chain is no longer safe - // cleanup state - am.keeper.DeleteProviderChannel(ctx) - channelClosedMsg := fmt.Sprintf("CCV channel %q was closed - shutdown consumer chain since it is not secured anymore", channelID) ctx.Logger().Error(channelClosedMsg) panic(channelClosedMsg) diff --git a/x/ccv/consumer/types/codec.go b/x/ccv/consumer/types/codec.go new file mode 100644 index 0000000000..e90cf8a7f9 --- /dev/null +++ b/x/ccv/consumer/types/codec.go @@ -0,0 +1,10 @@ +package types + +// // RegisterInterfaces register the ibc transfer module interfaces to protobuf +// // Any. +// func RegisterInterfaces(registry codectypes.InterfaceRegistry) { +// registry.RegisterImplementations( +// (*govtypes.Content)(nil), +// &ConsumerAdditionProposal{}, +// ) +// } diff --git a/x/ccv/consumer/types/consumer.pb.go b/x/ccv/consumer/types/consumer.pb.go index df4994c932..f6a55d06c0 100644 --- a/x/ccv/consumer/types/consumer.pb.go +++ b/x/ccv/consumer/types/consumer.pb.go @@ -29,6 +29,8 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // Params defines the parameters for CCV consumer module type Params struct { + // TODO: Remove enabled flag and find a better way to setup e2e tests + // See: https://github.com/cosmos/interchain-security/issues/339 Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` /////////////////////// // Distribution Params @@ -266,11 +268,57 @@ func (m *SlashRequest) GetInfraction() types2.InfractionType { return types2.InfractionEmpty } +// SlashRequests is a list of slash requests for CCV consumer module +type SlashRequests struct { + Requests []SlashRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests"` +} + +func (m *SlashRequests) Reset() { *m = SlashRequests{} } +func (m *SlashRequests) String() string { return proto.CompactTextString(m) } +func (*SlashRequests) ProtoMessage() {} +func (*SlashRequests) Descriptor() ([]byte, []int) { + return fileDescriptor_5b27a82b276e7f93, []int{4} +} +func (m *SlashRequests) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SlashRequests) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SlashRequests.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SlashRequests) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlashRequests.Merge(m, src) +} +func (m *SlashRequests) XXX_Size() int { + return m.Size() +} +func (m *SlashRequests) XXX_DiscardUnknown() { + xxx_messageInfo_SlashRequests.DiscardUnknown(m) +} + +var xxx_messageInfo_SlashRequests proto.InternalMessageInfo + +func (m *SlashRequests) GetRequests() []SlashRequest { + if m != nil { + return m.Requests + } + return nil +} + func init() { proto.RegisterType((*Params)(nil), "interchain_security.ccv.consumer.v1.Params") proto.RegisterType((*LastTransmissionBlockHeight)(nil), "interchain_security.ccv.consumer.v1.LastTransmissionBlockHeight") proto.RegisterType((*CrossChainValidator)(nil), "interchain_security.ccv.consumer.v1.CrossChainValidator") proto.RegisterType((*SlashRequest)(nil), "interchain_security.ccv.consumer.v1.SlashRequest") + proto.RegisterType((*SlashRequests)(nil), "interchain_security.ccv.consumer.v1.SlashRequests") } func init() { @@ -278,44 +326,46 @@ func init() { } var fileDescriptor_5b27a82b276e7f93 = []byte{ - // 578 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xad, 0xbf, 0x7e, 0x04, 0x98, 0x56, 0x2c, 0x4c, 0x54, 0x42, 0x91, 0xdc, 0xd6, 0x54, 0xa8, - 0x12, 0xea, 0x58, 0x49, 0xc5, 0x26, 0xbb, 0x26, 0x55, 0xc5, 0x9f, 0x44, 0xe4, 0x46, 0x2c, 0xd8, - 0x58, 0xe3, 0xf1, 0x8d, 0x3d, 0x8a, 0x3d, 0x63, 0x66, 0xc6, 0x06, 0xbf, 0x05, 0x7b, 0x5e, 0x80, - 0x07, 0xe0, 0x21, 0x10, 0xab, 0x2e, 0x59, 0x21, 0x94, 0xbc, 0x01, 0x5b, 0x36, 0xc8, 0x7f, 0x21, - 0x48, 0x64, 0x77, 0x8f, 0xee, 0x39, 0xc7, 0xd7, 0xf7, 0x9e, 0x41, 0x03, 0xc6, 0x35, 0x48, 0x1a, - 0x11, 0xc6, 0x3d, 0x05, 0x34, 0x93, 0x4c, 0x17, 0x0e, 0xa5, 0xb9, 0x43, 0x05, 0x57, 0x59, 0x02, - 0xd2, 0xc9, 0xfb, 0xab, 0x1a, 0xa7, 0x52, 0x68, 0x61, 0x3e, 0xfc, 0x87, 0x06, 0x53, 0x9a, 0xe3, - 0x15, 0x2f, 0xef, 0xef, 0x1f, 0x6f, 0x32, 0x2e, 0xfd, 0x68, 0x5e, 0x5b, 0xed, 0xdf, 0x0f, 0x85, - 0x08, 0x63, 0x70, 0x2a, 0xe4, 0x67, 0x33, 0x87, 0xf0, 0xa2, 0x69, 0x1d, 0x53, 0xa1, 0x12, 0xa1, - 0x1c, 0xa5, 0xc9, 0x9c, 0xf1, 0xd0, 0xc9, 0xfb, 0x3e, 0x68, 0xd2, 0x6f, 0x71, 0xc3, 0xea, 0x86, - 0x22, 0x14, 0x55, 0xe9, 0x94, 0x55, 0x6b, 0x5b, 0x6b, 0xbd, 0xba, 0x51, 0x83, 0xba, 0x65, 0xff, - 0x32, 0x50, 0x67, 0x42, 0x24, 0x49, 0x94, 0xd9, 0x43, 0x37, 0x81, 0x13, 0x3f, 0x86, 0xa0, 0x67, - 0x1c, 0x1a, 0x27, 0xb7, 0xdc, 0x16, 0x9a, 0xaf, 0xd0, 0xb1, 0x1f, 0x0b, 0x3a, 0x57, 0x5e, 0x0a, - 0xd2, 0x0b, 0x98, 0xd2, 0x92, 0xf9, 0x99, 0x66, 0x82, 0x7b, 0x5a, 0x12, 0xae, 0x12, 0xa6, 0x14, - 0x13, 0xbc, 0xf7, 0xdf, 0xa1, 0x71, 0xb2, 0xed, 0x1e, 0xd5, 0xdc, 0x09, 0xc8, 0x8b, 0x35, 0xe6, - 0x74, 0x8d, 0x68, 0x3e, 0x47, 0x47, 0x1b, 0x5d, 0x3c, 0x1a, 0x11, 0xce, 0x21, 0xee, 0x6d, 0x1f, - 0x1a, 0x27, 0xb7, 0xdd, 0x83, 0x60, 0x83, 0xc9, 0xb8, 0xa6, 0x99, 0x43, 0xb4, 0x9f, 0x4a, 0x91, - 0xb3, 0x00, 0xa4, 0x37, 0x03, 0xf0, 0x52, 0x21, 0x62, 0x8f, 0x04, 0x81, 0xf4, 0x94, 0x96, 0xbd, - 0xff, 0x2b, 0x93, 0xbd, 0x96, 0x71, 0x09, 0x30, 0x11, 0x22, 0x3e, 0x0f, 0x02, 0x79, 0xa5, 0xa5, - 0xfd, 0x04, 0x3d, 0x78, 0x49, 0x94, 0x5e, 0xb7, 0x1d, 0x95, 0xc3, 0x3f, 0x05, 0x16, 0x46, 0xda, - 0xdc, 0x43, 0x9d, 0xa8, 0xaa, 0xaa, 0x85, 0x6c, 0xbb, 0x0d, 0xb2, 0x3f, 0x19, 0xe8, 0xee, 0x58, - 0x0a, 0xa5, 0xc6, 0xe5, 0x3d, 0x5f, 0x93, 0x98, 0x05, 0x44, 0x0b, 0x59, 0x6e, 0xb0, 0xfc, 0x30, - 0x28, 0x55, 0x09, 0x76, 0xdd, 0x16, 0x9a, 0x5d, 0x74, 0x23, 0x15, 0xef, 0x40, 0x36, 0x2b, 0xaa, - 0x81, 0x49, 0x50, 0x27, 0xcd, 0xfc, 0x39, 0x14, 0xd5, 0xbf, 0xee, 0x0c, 0xba, 0xb8, 0xbe, 0x3f, - 0x6e, 0xef, 0x8f, 0xcf, 0x79, 0x31, 0x3a, 0xfb, 0xf9, 0xfd, 0xe0, 0x5e, 0x41, 0x92, 0x78, 0x68, - 0x97, 0x89, 0x02, 0xae, 0x32, 0xe5, 0xd5, 0x3a, 0xfb, 0xeb, 0xe7, 0xd3, 0x6e, 0x73, 0x4f, 0x2a, - 0x8b, 0x54, 0x0b, 0x3c, 0xc9, 0xfc, 0x17, 0x50, 0xb8, 0x8d, 0xb1, 0xfd, 0xd1, 0x40, 0xbb, 0x57, - 0x31, 0x51, 0x91, 0x0b, 0x6f, 0x33, 0x50, 0xda, 0x1c, 0xa3, 0x4e, 0x4a, 0xe8, 0x1c, 0xea, 0x7f, - 0xda, 0x19, 0x3c, 0xc6, 0x9b, 0xe2, 0x9b, 0xf7, 0x71, 0xa5, 0x9c, 0x54, 0xf4, 0x0b, 0xa2, 0x89, - 0xdb, 0x48, 0xcd, 0x4b, 0x84, 0x18, 0x9f, 0x49, 0x42, 0x75, 0x7b, 0xf6, 0x3b, 0x83, 0x47, 0xb8, - 0x19, 0xa4, 0x4d, 0x64, 0x93, 0x50, 0xfc, 0x6c, 0xc5, 0x9c, 0x16, 0x29, 0xb8, 0x6b, 0xca, 0xd1, - 0xf4, 0xcb, 0xc2, 0x32, 0xae, 0x17, 0x96, 0xf1, 0x63, 0x61, 0x19, 0x1f, 0x96, 0xd6, 0xd6, 0xf5, - 0xd2, 0xda, 0xfa, 0xb6, 0xb4, 0xb6, 0xde, 0x0c, 0x43, 0xa6, 0xa3, 0xcc, 0xc7, 0x54, 0x24, 0x4d, - 0x60, 0x9d, 0x3f, 0x73, 0x9e, 0xae, 0x5e, 0xd0, 0xfb, 0xbf, 0x1f, 0xa7, 0x2e, 0x52, 0x50, 0x7e, - 0xa7, 0x5a, 0xdf, 0xd9, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0x85, 0x02, 0x02, 0xcd, 0x03, - 0x00, 0x00, + // 614 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0x8d, 0xbf, 0xf4, 0x0b, 0x65, 0x5a, 0x58, 0x98, 0xa8, 0x84, 0x22, 0xa5, 0xa9, 0xa9, 0x50, + 0x24, 0xd4, 0xb1, 0x92, 0x8a, 0x4d, 0x77, 0x4d, 0xaa, 0x8a, 0x3f, 0x89, 0xc8, 0xad, 0x58, 0xb0, + 0xb1, 0xc6, 0xe3, 0x5b, 0x67, 0x14, 0x7b, 0xc6, 0xcc, 0x8c, 0x0d, 0x7e, 0x0b, 0xf6, 0xbc, 0x00, + 0x0f, 0xc0, 0x43, 0x54, 0xac, 0xba, 0x64, 0x55, 0xa1, 0xf6, 0x0d, 0xd8, 0xb2, 0x41, 0xfe, 0x0b, + 0x46, 0x22, 0x12, 0xbb, 0x7b, 0x34, 0xe7, 0x1c, 0xcf, 0xdc, 0x73, 0xaf, 0xd1, 0x98, 0x71, 0x0d, + 0x92, 0xce, 0x09, 0xe3, 0xae, 0x02, 0x9a, 0x48, 0xa6, 0x33, 0x9b, 0xd2, 0xd4, 0xa6, 0x82, 0xab, + 0x24, 0x02, 0x69, 0xa7, 0xa3, 0x65, 0x8d, 0x63, 0x29, 0xb4, 0x30, 0x1f, 0xfd, 0x45, 0x83, 0x29, + 0x4d, 0xf1, 0x92, 0x97, 0x8e, 0xb6, 0xf7, 0x56, 0x19, 0xe7, 0x7e, 0x34, 0x2d, 0xad, 0xb6, 0x1f, + 0x04, 0x42, 0x04, 0x21, 0xd8, 0x05, 0xf2, 0x92, 0x73, 0x9b, 0xf0, 0xac, 0x3a, 0xda, 0xa3, 0x42, + 0x45, 0x42, 0xd9, 0x4a, 0x93, 0x05, 0xe3, 0x81, 0x9d, 0x8e, 0x3c, 0xd0, 0x64, 0x54, 0xe3, 0x8a, + 0xd5, 0x0d, 0x44, 0x20, 0x8a, 0xd2, 0xce, 0xab, 0xda, 0xb6, 0xd4, 0xba, 0xe5, 0x41, 0x09, 0xca, + 0x23, 0xeb, 0xa7, 0x81, 0x3a, 0x33, 0x22, 0x49, 0xa4, 0xcc, 0x1e, 0xba, 0x05, 0x9c, 0x78, 0x21, + 0xf8, 0x3d, 0x63, 0x60, 0x0c, 0xd7, 0x9d, 0x1a, 0x9a, 0xaf, 0xd1, 0x9e, 0x17, 0x0a, 0xba, 0x50, + 0x6e, 0x0c, 0xd2, 0xf5, 0x99, 0xd2, 0x92, 0x79, 0x89, 0x66, 0x82, 0xbb, 0x5a, 0x12, 0xae, 0x22, + 0xa6, 0x14, 0x13, 0xbc, 0xf7, 0xdf, 0xc0, 0x18, 0xb6, 0x9d, 0xdd, 0x92, 0x3b, 0x03, 0x79, 0xdc, + 0x60, 0x9e, 0x35, 0x88, 0xe6, 0x0b, 0xb4, 0xbb, 0xd2, 0xc5, 0xa5, 0x73, 0xc2, 0x39, 0x84, 0xbd, + 0xf6, 0xc0, 0x18, 0xde, 0x76, 0x76, 0xfc, 0x15, 0x26, 0xd3, 0x92, 0x66, 0x1e, 0xa2, 0xed, 0x58, + 0x8a, 0x94, 0xf9, 0x20, 0xdd, 0x73, 0x00, 0x37, 0x16, 0x22, 0x74, 0x89, 0xef, 0x4b, 0x57, 0x69, + 0xd9, 0x5b, 0x2b, 0x4c, 0xb6, 0x6a, 0xc6, 0x09, 0xc0, 0x4c, 0x88, 0xf0, 0xc8, 0xf7, 0xe5, 0xa9, + 0x96, 0xd6, 0x53, 0xf4, 0xf0, 0x15, 0x51, 0xba, 0x69, 0x3b, 0xc9, 0x2f, 0xff, 0x0c, 0x58, 0x30, + 0xd7, 0xe6, 0x16, 0xea, 0xcc, 0x8b, 0xaa, 0x68, 0x48, 0xdb, 0xa9, 0x90, 0xf5, 0xd9, 0x40, 0xf7, + 0xa6, 0x52, 0x28, 0x35, 0xcd, 0xf3, 0x7c, 0x43, 0x42, 0xe6, 0x13, 0x2d, 0x64, 0xde, 0xc1, 0xfc, + 0xc3, 0xa0, 0x54, 0x21, 0xd8, 0x74, 0x6a, 0x68, 0x76, 0xd1, 0xff, 0xb1, 0x78, 0x0f, 0xb2, 0x6a, + 0x51, 0x09, 0x4c, 0x82, 0x3a, 0x71, 0xe2, 0x2d, 0x20, 0x2b, 0xde, 0xba, 0x31, 0xee, 0xe2, 0x32, + 0x7f, 0x5c, 0xe7, 0x8f, 0x8f, 0x78, 0x36, 0x39, 0xf8, 0x71, 0xb5, 0x73, 0x3f, 0x23, 0x51, 0x78, + 0x68, 0xe5, 0x13, 0x05, 0x5c, 0x25, 0xca, 0x2d, 0x75, 0xd6, 0xd7, 0x2f, 0xfb, 0xdd, 0x2a, 0x4f, + 0x2a, 0xb3, 0x58, 0x0b, 0x3c, 0x4b, 0xbc, 0x97, 0x90, 0x39, 0x95, 0xb1, 0xf5, 0xc9, 0x40, 0x9b, + 0xa7, 0x21, 0x51, 0x73, 0x07, 0xde, 0x25, 0xa0, 0xb4, 0x39, 0x45, 0x9d, 0x98, 0xd0, 0x05, 0x94, + 0x6f, 0xda, 0x18, 0x3f, 0xc1, 0xab, 0xc6, 0x37, 0x1d, 0xe1, 0x42, 0x39, 0x2b, 0xe8, 0xc7, 0x44, + 0x13, 0xa7, 0x92, 0x9a, 0x27, 0x08, 0x31, 0x7e, 0x2e, 0x09, 0xd5, 0x75, 0xec, 0x77, 0xc7, 0x8f, + 0x71, 0x75, 0x91, 0x7a, 0x22, 0xab, 0x09, 0xc5, 0xcf, 0x97, 0xcc, 0xb3, 0x2c, 0x06, 0xa7, 0xa1, + 0xb4, 0x7c, 0x74, 0xa7, 0x79, 0x39, 0x65, 0x9e, 0xa2, 0x75, 0x59, 0xd5, 0x3d, 0x63, 0xd0, 0x1e, + 0x6e, 0x8c, 0x47, 0xf8, 0x1f, 0xd6, 0x0b, 0x37, 0x5d, 0x26, 0x6b, 0x17, 0x57, 0x3b, 0x2d, 0x67, + 0x69, 0x34, 0x39, 0xbb, 0xb8, 0xee, 0x1b, 0x97, 0xd7, 0x7d, 0xe3, 0xfb, 0x75, 0xdf, 0xf8, 0x78, + 0xd3, 0x6f, 0x5d, 0xde, 0xf4, 0x5b, 0xdf, 0x6e, 0xfa, 0xad, 0xb7, 0x87, 0x01, 0xd3, 0xf3, 0xc4, + 0xc3, 0x54, 0x44, 0xd5, 0x5a, 0xd8, 0xbf, 0xbf, 0xb6, 0xbf, 0xdc, 0xd3, 0x0f, 0x7f, 0xfe, 0x02, + 0x74, 0x16, 0x83, 0xf2, 0x3a, 0x45, 0x48, 0x07, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0x91, 0xef, + 0xc0, 0x3c, 0x33, 0x04, 0x00, 0x00, } func (m *Params) Marshal() (dAtA []byte, err error) { @@ -485,6 +535,43 @@ func (m *SlashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SlashRequests) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlashRequests) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SlashRequests) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + for iNdEx := len(m.Requests) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Requests[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintConsumer(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintConsumer(dAtA []byte, offset int, v uint64) int { offset -= sovConsumer(v) base := offset @@ -567,6 +654,21 @@ func (m *SlashRequest) Size() (n int) { return n } +func (m *SlashRequests) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Requests) > 0 { + for _, e := range m.Requests { + l = e.Size() + n += 1 + l + sovConsumer(uint64(l)) + } + } + return n +} + func sovConsumer(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1039,6 +1141,90 @@ func (m *SlashRequest) Unmarshal(dAtA []byte) error { } return nil } +func (m *SlashRequests) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConsumer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SlashRequests: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SlashRequests: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowConsumer + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthConsumer + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthConsumer + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Requests = append(m.Requests, SlashRequest{}) + if err := m.Requests[len(m.Requests)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipConsumer(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthConsumer + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipConsumer(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/ccv/consumer/types/genesis.go b/x/ccv/consumer/types/genesis.go index a114d0bd17..0e2a0b2557 100644 --- a/x/ccv/consumer/types/genesis.go +++ b/x/ccv/consumer/types/genesis.go @@ -12,9 +12,8 @@ import ( ) // NewInitialGenesisState returns a consumer GenesisState for a completely new consumer chain. -// TODO: Include chain status func NewInitialGenesisState(cs *ibctmtypes.ClientState, consState *ibctmtypes.ConsensusState, - initValSet []abci.ValidatorUpdate, params Params) *GenesisState { + initValSet []abci.ValidatorUpdate, slashRequests SlashRequests, params Params) *GenesisState { return &GenesisState{ Params: params, @@ -22,21 +21,28 @@ func NewInitialGenesisState(cs *ibctmtypes.ClientState, consState *ibctmtypes.Co ProviderClientState: cs, ProviderConsensusState: consState, InitialValSet: initValSet, + PendingSlashRequests: slashRequests, } } // NewRestartGenesisState returns a consumer GenesisState that has already been established. func NewRestartGenesisState(clientID, channelID string, maturingPackets []MaturingVSCPacket, - initValSet []abci.ValidatorUpdate, params Params) *GenesisState { + initValSet []abci.ValidatorUpdate, + heightToValsetUpdateIDs []HeightToValsetUpdateID, + outstandingDowntimes []OutstandingDowntime, + params Params, +) *GenesisState { return &GenesisState{ - Params: params, - ProviderClientId: clientID, - ProviderChannelId: channelID, - MaturingPackets: maturingPackets, - NewChain: false, - InitialValSet: initValSet, + Params: params, + ProviderClientId: clientID, + ProviderChannelId: channelID, + MaturingPackets: maturingPackets, + NewChain: false, + InitialValSet: initValSet, + HeightToValsetUpdateId: heightToValsetUpdateIDs, + OutstandingDowntimeSlashing: outstandingDowntimes, } } diff --git a/x/ccv/consumer/types/genesis.pb.go b/x/ccv/consumer/types/genesis.pb.go index 19ffb21d47..aa66509507 100644 --- a/x/ccv/consumer/types/genesis.pb.go +++ b/x/ccv/consumer/types/genesis.pb.go @@ -35,9 +35,17 @@ type GenesisState struct { // ProviderClientState filled in on new chain, nil on restart. ProviderClientState *types.ClientState `protobuf:"bytes,5,opt,name=provider_client_state,json=providerClientState,proto3" json:"provider_client_state,omitempty"` // ProviderConsensusState filled in on new chain, nil on restart. - ProviderConsensusState *types.ConsensusState `protobuf:"bytes,6,opt,name=provider_consensus_state,json=providerConsensusState,proto3" json:"provider_consensus_state,omitempty"` - MaturingPackets []MaturingVSCPacket `protobuf:"bytes,7,rep,name=maturing_packets,json=maturingPackets,proto3" json:"maturing_packets"` - InitialValSet []types1.ValidatorUpdate `protobuf:"bytes,8,rep,name=initial_val_set,json=initialValSet,proto3" json:"initial_val_set"` + ProviderConsensusState *types.ConsensusState `protobuf:"bytes,6,opt,name=provider_consensus_state,json=providerConsensusState,proto3" json:"provider_consensus_state,omitempty"` + // MaturingPackets nil on new chain, filled on restart. + MaturingPackets []MaturingVSCPacket `protobuf:"bytes,7,rep,name=maturing_packets,json=maturingPackets,proto3" json:"maturing_packets"` + // InitialValset filled in on new chain, manually filled in on restart. + InitialValSet []types1.ValidatorUpdate `protobuf:"bytes,8,rep,name=initial_val_set,json=initialValSet,proto3" json:"initial_val_set"` + // HeightToValsetUpdateId nil on new chain, filled on restart. + HeightToValsetUpdateId []HeightToValsetUpdateID `protobuf:"bytes,9,rep,name=height_to_valset_update_id,json=heightToValsetUpdateId,proto3" json:"height_to_valset_update_id"` + // OutstandingDowntimes nil on new chain, filled on restart. + OutstandingDowntimeSlashing []OutstandingDowntime `protobuf:"bytes,10,rep,name=outstanding_downtime_slashing,json=outstandingDowntimeSlashing,proto3" json:"outstanding_downtime_slashing"` + // PendingSlashRequests filled in on new chain, nil on restart. + PendingSlashRequests SlashRequests `protobuf:"bytes,11,opt,name=pending_slash_requests,json=pendingSlashRequests,proto3" json:"pending_slash_requests"` } func (m *GenesisState) Reset() { *m = GenesisState{} } @@ -129,7 +137,29 @@ func (m *GenesisState) GetInitialValSet() []types1.ValidatorUpdate { return nil } -// UnbondingSequence defines the genesis information for each unbonding packet sequence. +func (m *GenesisState) GetHeightToValsetUpdateId() []HeightToValsetUpdateID { + if m != nil { + return m.HeightToValsetUpdateId + } + return nil +} + +func (m *GenesisState) GetOutstandingDowntimeSlashing() []OutstandingDowntime { + if m != nil { + return m.OutstandingDowntimeSlashing + } + return nil +} + +func (m *GenesisState) GetPendingSlashRequests() SlashRequests { + if m != nil { + return m.PendingSlashRequests + } + return SlashRequests{} +} + +// UnbondingSequence defines the genesis information for each unbonding packet +// sequence. type MaturingVSCPacket struct { VscId uint64 `protobuf:"varint,1,opt,name=vscId,proto3" json:"vscId,omitempty"` MaturityTime uint64 `protobuf:"varint,2,opt,name=maturity_time,json=maturityTime,proto3" json:"maturity_time,omitempty"` @@ -182,9 +212,111 @@ func (m *MaturingVSCPacket) GetMaturityTime() uint64 { return 0 } +// HeightValsetUpdateID defines the genesis information for the mapping +// of each block height to a valset update id +type HeightToValsetUpdateID struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + ValsetUpdateId uint64 `protobuf:"varint,2,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` +} + +func (m *HeightToValsetUpdateID) Reset() { *m = HeightToValsetUpdateID{} } +func (m *HeightToValsetUpdateID) String() string { return proto.CompactTextString(m) } +func (*HeightToValsetUpdateID) ProtoMessage() {} +func (*HeightToValsetUpdateID) Descriptor() ([]byte, []int) { + return fileDescriptor_2db73a6057a27482, []int{2} +} +func (m *HeightToValsetUpdateID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HeightToValsetUpdateID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_HeightToValsetUpdateID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *HeightToValsetUpdateID) XXX_Merge(src proto.Message) { + xxx_messageInfo_HeightToValsetUpdateID.Merge(m, src) +} +func (m *HeightToValsetUpdateID) XXX_Size() int { + return m.Size() +} +func (m *HeightToValsetUpdateID) XXX_DiscardUnknown() { + xxx_messageInfo_HeightToValsetUpdateID.DiscardUnknown(m) +} + +var xxx_messageInfo_HeightToValsetUpdateID proto.InternalMessageInfo + +func (m *HeightToValsetUpdateID) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *HeightToValsetUpdateID) GetValsetUpdateId() uint64 { + if m != nil { + return m.ValsetUpdateId + } + return 0 +} + +// OutstandingDowntime defines the genesis information for each validator +// flagged with an outstanding downtime slashing. +type OutstandingDowntime struct { + ValidatorConsensusAddress string `protobuf:"bytes,1,opt,name=validator_consensus_address,json=validatorConsensusAddress,proto3" json:"validator_consensus_address,omitempty"` +} + +func (m *OutstandingDowntime) Reset() { *m = OutstandingDowntime{} } +func (m *OutstandingDowntime) String() string { return proto.CompactTextString(m) } +func (*OutstandingDowntime) ProtoMessage() {} +func (*OutstandingDowntime) Descriptor() ([]byte, []int) { + return fileDescriptor_2db73a6057a27482, []int{3} +} +func (m *OutstandingDowntime) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OutstandingDowntime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OutstandingDowntime.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OutstandingDowntime) XXX_Merge(src proto.Message) { + xxx_messageInfo_OutstandingDowntime.Merge(m, src) +} +func (m *OutstandingDowntime) XXX_Size() int { + return m.Size() +} +func (m *OutstandingDowntime) XXX_DiscardUnknown() { + xxx_messageInfo_OutstandingDowntime.DiscardUnknown(m) +} + +var xxx_messageInfo_OutstandingDowntime proto.InternalMessageInfo + +func (m *OutstandingDowntime) GetValidatorConsensusAddress() string { + if m != nil { + return m.ValidatorConsensusAddress + } + return "" +} + func init() { proto.RegisterType((*GenesisState)(nil), "interchain_security.ccv.consumer.v1.GenesisState") proto.RegisterType((*MaturingVSCPacket)(nil), "interchain_security.ccv.consumer.v1.MaturingVSCPacket") + proto.RegisterType((*HeightToValsetUpdateID)(nil), "interchain_security.ccv.consumer.v1.HeightToValsetUpdateID") + proto.RegisterType((*OutstandingDowntime)(nil), "interchain_security.ccv.consumer.v1.OutstandingDowntime") } func init() { @@ -192,41 +324,53 @@ func init() { } var fileDescriptor_2db73a6057a27482 = []byte{ - // 543 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcf, 0x6f, 0xd3, 0x30, - 0x14, 0x6e, 0x58, 0x37, 0x36, 0x6f, 0xd3, 0xb6, 0x6c, 0xa0, 0x68, 0x93, 0x42, 0xd9, 0x2e, 0x95, - 0x00, 0x5b, 0x2d, 0x12, 0x07, 0x8e, 0xeb, 0x01, 0xf5, 0xc0, 0x34, 0xa5, 0xa3, 0x07, 0x2e, 0x91, - 0xeb, 0x3c, 0xa5, 0x16, 0x89, 0x1d, 0xc5, 0x6e, 0x46, 0xff, 0x0b, 0xfe, 0xac, 0x1d, 0x77, 0xdc, - 0x09, 0xa1, 0xf6, 0x1f, 0x41, 0xb1, 0x93, 0xfe, 0x00, 0x24, 0x7a, 0xb3, 0x9f, 0xdf, 0xf7, 0xbe, - 0xef, 0x7d, 0xcf, 0x0f, 0x75, 0xb8, 0xd0, 0x90, 0xb3, 0x31, 0xe5, 0x22, 0x54, 0xc0, 0x26, 0x39, - 0xd7, 0x53, 0xc2, 0x58, 0x41, 0x98, 0x14, 0x6a, 0x92, 0x42, 0x4e, 0x8a, 0x0e, 0x89, 0x41, 0x80, - 0xe2, 0x0a, 0x67, 0xb9, 0xd4, 0xd2, 0xbd, 0xfa, 0x07, 0x04, 0x33, 0x56, 0xe0, 0x1a, 0x82, 0x8b, - 0xce, 0x39, 0xe1, 0x23, 0x46, 0x12, 0x1e, 0x8f, 0x35, 0x4b, 0x38, 0x08, 0xad, 0x88, 0x06, 0x11, - 0x41, 0x9e, 0x72, 0xa1, 0xcb, 0x92, 0xcb, 0x9b, 0xad, 0x7a, 0xfe, 0xba, 0x04, 0x30, 0x99, 0x03, - 0x61, 0x63, 0x2a, 0x04, 0x24, 0x65, 0x56, 0x75, 0xac, 0x52, 0xce, 0x62, 0x19, 0x4b, 0x73, 0x24, - 0xe5, 0xa9, 0x8a, 0x76, 0x37, 0xe9, 0x60, 0x21, 0xcd, 0x62, 0x2e, 0x56, 0xc4, 0xd0, 0x11, 0xe3, - 0x44, 0x4f, 0x33, 0xa8, 0xfa, 0xbb, 0x7c, 0x6a, 0xa2, 0x83, 0x4f, 0xb6, 0xe3, 0x81, 0xa6, 0x1a, - 0xdc, 0x3e, 0xda, 0xc9, 0x68, 0x4e, 0x53, 0xe5, 0x39, 0x2d, 0xa7, 0xbd, 0xdf, 0x7d, 0x83, 0x37, - 0x70, 0x00, 0xdf, 0x1a, 0xc8, 0x75, 0xf3, 0xe1, 0xe7, 0xab, 0x46, 0x50, 0x15, 0x70, 0xdf, 0x22, - 0x37, 0xcb, 0x65, 0xc1, 0x23, 0xc8, 0x43, 0x6b, 0x4c, 0xc8, 0x23, 0xef, 0x59, 0xcb, 0x69, 0xef, - 0x05, 0xc7, 0xf5, 0x4b, 0xcf, 0x3c, 0xf4, 0x23, 0x17, 0xa3, 0xd3, 0x65, 0xb6, 0xb5, 0xa2, 0x4c, - 0xdf, 0x32, 0xe9, 0x27, 0x8b, 0x74, 0xfb, 0xd2, 0x8f, 0xdc, 0x0b, 0xb4, 0x27, 0xe0, 0x3e, 0x34, - 0xc2, 0xbc, 0x66, 0xcb, 0x69, 0xef, 0x06, 0xbb, 0x02, 0xee, 0x7b, 0xe5, 0xdd, 0x0d, 0xd1, 0x8b, - 0x3f, 0xa9, 0x55, 0xd9, 0x9e, 0xb7, 0x5d, 0x37, 0x35, 0x62, 0x78, 0x75, 0x62, 0x78, 0x65, 0x46, - 0x45, 0x07, 0x5b, 0x55, 0xc6, 0x91, 0xe0, 0x74, 0x5d, 0xaa, 0xb5, 0x69, 0x8c, 0xbc, 0x25, 0x81, - 0x14, 0x0a, 0x84, 0x9a, 0xa8, 0x8a, 0x63, 0xc7, 0x70, 0xe0, 0xff, 0x72, 0xd4, 0x30, 0x4b, 0xf3, - 0x72, 0x41, 0xb3, 0x16, 0x77, 0x63, 0x74, 0x9c, 0x52, 0x3d, 0xc9, 0xb9, 0x88, 0xc3, 0x8c, 0xb2, - 0x6f, 0xa0, 0x95, 0xf7, 0xbc, 0xb5, 0xd5, 0xde, 0xef, 0x7e, 0xd8, 0x68, 0x34, 0x9f, 0x2b, 0xf0, - 0x70, 0xd0, 0xbb, 0x35, 0xf0, 0x6a, 0x4a, 0x47, 0x75, 0x55, 0x1b, 0x55, 0xee, 0x0d, 0x3a, 0xe2, - 0x82, 0x6b, 0x4e, 0x93, 0xb0, 0xa0, 0x49, 0xa8, 0x40, 0x7b, 0xbb, 0x86, 0xa7, 0xb5, 0x2a, 0xbc, - 0xfc, 0x41, 0x78, 0x48, 0x13, 0x1e, 0x51, 0x2d, 0xf3, 0x2f, 0x59, 0x44, 0x35, 0x54, 0x15, 0x0f, - 0x2b, 0xf8, 0x90, 0x26, 0x03, 0xd0, 0x97, 0x37, 0xe8, 0xe4, 0x2f, 0x6e, 0xf7, 0x0c, 0x6d, 0x17, - 0x8a, 0xf5, 0x23, 0xf3, 0xbb, 0x9a, 0x81, 0xbd, 0xb8, 0x57, 0xe8, 0xd0, 0xaa, 0xd1, 0xd3, 0x50, - 0xf3, 0x14, 0xcc, 0x27, 0x69, 0x06, 0x07, 0x75, 0xf0, 0x8e, 0xa7, 0x70, 0x7d, 0xf7, 0x30, 0xf3, - 0x9d, 0xc7, 0x99, 0xef, 0xfc, 0x9a, 0xf9, 0xce, 0x8f, 0xb9, 0xdf, 0x78, 0x9c, 0xfb, 0x8d, 0xa7, - 0xb9, 0xdf, 0xf8, 0xfa, 0x31, 0xe6, 0x7a, 0x3c, 0x19, 0x61, 0x26, 0x53, 0xc2, 0xa4, 0x4a, 0xa5, - 0x22, 0x4b, 0x67, 0xde, 0x2d, 0xf6, 0xe4, 0xfb, 0xfa, 0xa6, 0x98, 0x35, 0x18, 0xed, 0x98, 0x3d, - 0x78, 0xff, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xcd, 0x97, 0x1d, 0x59, 0x1c, 0x04, 0x00, 0x00, + // 729 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0xcf, 0x4f, 0x1b, 0x39, + 0x14, 0xce, 0x2c, 0x21, 0x10, 0x03, 0x0b, 0x18, 0x36, 0x9a, 0x25, 0xda, 0x6c, 0x36, 0x5c, 0x22, + 0x6d, 0x3b, 0xa3, 0xa4, 0x52, 0x55, 0xb5, 0x52, 0xa5, 0x02, 0x52, 0x9b, 0x43, 0x29, 0x9a, 0x40, + 0x0e, 0x5c, 0x46, 0x8e, 0xc7, 0x9a, 0xb1, 0x3a, 0x63, 0xa7, 0x63, 0xcf, 0x50, 0x0e, 0xbd, 0xf4, + 0x2f, 0xe8, 0x9f, 0xc5, 0xa1, 0x07, 0x8e, 0x3d, 0x55, 0x15, 0xfc, 0x23, 0xd5, 0xd8, 0xce, 0xaf, + 0x12, 0xa9, 0xb9, 0xd9, 0x7e, 0xef, 0xfb, 0xbe, 0xf7, 0xbe, 0x67, 0x1b, 0x74, 0x28, 0x93, 0x24, + 0xc5, 0x11, 0xa2, 0xcc, 0x17, 0x04, 0x67, 0x29, 0x95, 0xd7, 0x2e, 0xc6, 0xb9, 0x8b, 0x39, 0x13, + 0x59, 0x42, 0x52, 0x37, 0xef, 0xb8, 0x21, 0x61, 0x44, 0x50, 0xe1, 0x8c, 0x52, 0x2e, 0x39, 0x3c, + 0x5c, 0x00, 0x71, 0x30, 0xce, 0x9d, 0x31, 0xc4, 0xc9, 0x3b, 0x07, 0x2e, 0x1d, 0x62, 0x37, 0xa6, + 0x61, 0x24, 0x71, 0x4c, 0x09, 0x93, 0xc2, 0x95, 0x84, 0x05, 0x24, 0x4d, 0x28, 0x93, 0x05, 0xe5, + 0x74, 0xa7, 0x59, 0x0f, 0xfe, 0x2b, 0x00, 0x98, 0xa7, 0xc4, 0xc5, 0x11, 0x62, 0x8c, 0xc4, 0x45, + 0x96, 0x59, 0x9a, 0x94, 0xfd, 0x90, 0x87, 0x5c, 0x2d, 0xdd, 0x62, 0x65, 0x4e, 0xbb, 0xcb, 0x74, + 0x30, 0x29, 0x4d, 0x63, 0xea, 0x33, 0xc5, 0xa0, 0x21, 0xa6, 0xae, 0xbc, 0x1e, 0x11, 0xd3, 0x5f, + 0xeb, 0xeb, 0x1a, 0xd8, 0x7c, 0xad, 0x3b, 0xee, 0x4b, 0x24, 0x09, 0xec, 0x81, 0xca, 0x08, 0xa5, + 0x28, 0x11, 0xb6, 0xd5, 0xb4, 0xda, 0x1b, 0xdd, 0xff, 0x9d, 0x25, 0x1c, 0x70, 0xce, 0x14, 0xe4, + 0xa8, 0x7c, 0xf3, 0xfd, 0xdf, 0x92, 0x67, 0x08, 0xe0, 0x23, 0x00, 0x47, 0x29, 0xcf, 0x69, 0x40, + 0x52, 0x5f, 0x1b, 0xe3, 0xd3, 0xc0, 0xfe, 0xa3, 0x69, 0xb5, 0xab, 0xde, 0xce, 0x38, 0x72, 0xac, + 0x02, 0xbd, 0x00, 0x3a, 0x60, 0x6f, 0x9a, 0xad, 0xad, 0x28, 0xd2, 0x57, 0x54, 0xfa, 0xee, 0x24, + 0x5d, 0x47, 0x7a, 0x01, 0xac, 0x83, 0x2a, 0x23, 0x57, 0xbe, 0x2a, 0xcc, 0x2e, 0x37, 0xad, 0xf6, + 0xba, 0xb7, 0xce, 0xc8, 0xd5, 0x71, 0xb1, 0x87, 0x3e, 0xf8, 0xeb, 0x57, 0x69, 0x51, 0xb4, 0x67, + 0xaf, 0x8e, 0x9b, 0x1a, 0x62, 0x67, 0x76, 0x62, 0xce, 0xcc, 0x8c, 0xf2, 0x8e, 0xa3, 0xab, 0x52, + 0x8e, 0x78, 0x7b, 0xf3, 0xa5, 0x6a, 0x9b, 0x22, 0x60, 0x4f, 0x05, 0x38, 0x13, 0x84, 0x89, 0x4c, + 0x18, 0x8d, 0x8a, 0xd2, 0x70, 0x7e, 0xab, 0x31, 0x86, 0x69, 0x99, 0xda, 0x44, 0x66, 0xee, 0x1c, + 0x86, 0x60, 0x27, 0x41, 0x32, 0x4b, 0x29, 0x0b, 0xfd, 0x11, 0xc2, 0xef, 0x89, 0x14, 0xf6, 0x5a, + 0x73, 0xa5, 0xbd, 0xd1, 0x7d, 0xba, 0xd4, 0x68, 0xde, 0x1a, 0xf0, 0xa0, 0x7f, 0x7c, 0xa6, 0xe0, + 0x66, 0x4a, 0xdb, 0x63, 0x56, 0x7d, 0x2a, 0xe0, 0x29, 0xd8, 0xa6, 0x8c, 0x4a, 0x8a, 0x62, 0x3f, + 0x47, 0xb1, 0x2f, 0x88, 0xb4, 0xd7, 0x95, 0x4e, 0x73, 0xb6, 0xf0, 0xe2, 0x06, 0x39, 0x03, 0x14, + 0xd3, 0x00, 0x49, 0x9e, 0x5e, 0x8c, 0x02, 0x24, 0x89, 0x61, 0xdc, 0x32, 0xf0, 0x01, 0x8a, 0xfb, + 0x44, 0xc2, 0x4f, 0xe0, 0x20, 0x22, 0x45, 0xfb, 0xbe, 0xe4, 0x05, 0xa3, 0x20, 0xd2, 0xcf, 0x54, + 0x7e, 0x31, 0xd7, 0xaa, 0xa2, 0x7e, 0xb1, 0x54, 0x0b, 0x6f, 0x14, 0xcd, 0x39, 0x1f, 0x28, 0x12, + 0xad, 0xd9, 0x3b, 0x31, 0xaa, 0xb5, 0x68, 0x51, 0x34, 0x80, 0x9f, 0x2d, 0xf0, 0x0f, 0xcf, 0xa4, + 0x90, 0x88, 0x05, 0x85, 0x77, 0x01, 0xbf, 0x62, 0x92, 0x26, 0xc4, 0x17, 0x31, 0x12, 0x11, 0x65, + 0xa1, 0x0d, 0x54, 0x09, 0xcf, 0x96, 0x2a, 0xe1, 0xdd, 0x94, 0xe9, 0xc4, 0x10, 0x19, 0xfd, 0x3a, + 0x7f, 0x18, 0xea, 0x1b, 0x09, 0xc8, 0x40, 0x6d, 0x44, 0xb4, 0xbe, 0x92, 0xf5, 0x53, 0xf2, 0x21, + 0x23, 0x42, 0x0a, 0x7b, 0x43, 0x5d, 0x92, 0xee, 0x52, 0xe2, 0x8a, 0xce, 0x33, 0x48, 0x23, 0xbb, + 0x6f, 0x78, 0xe7, 0x62, 0xad, 0x53, 0xb0, 0xfb, 0x60, 0xde, 0x70, 0x1f, 0xac, 0xe6, 0x02, 0xf7, + 0x02, 0xf5, 0xa2, 0xcb, 0x9e, 0xde, 0xc0, 0x43, 0xb0, 0xa5, 0x6f, 0x80, 0xbc, 0xf6, 0x8b, 0x9a, + 0xd5, 0xc3, 0x2c, 0x7b, 0x9b, 0xe3, 0xc3, 0x73, 0x9a, 0x90, 0xd6, 0x25, 0xa8, 0x2d, 0x36, 0x1f, + 0xd6, 0x40, 0x45, 0x1b, 0x6f, 0x58, 0xcd, 0x0e, 0xb6, 0xc1, 0xce, 0x83, 0x59, 0x6b, 0xe6, 0x3f, + 0xf3, 0xb9, 0x01, 0xb5, 0x2e, 0xc0, 0xde, 0x02, 0x57, 0xe1, 0x4b, 0x50, 0xcf, 0xc7, 0xd7, 0x6b, + 0xe6, 0x69, 0xa1, 0x20, 0x48, 0x89, 0xd0, 0xbf, 0x52, 0xd5, 0xfb, 0x7b, 0x92, 0x32, 0x79, 0x2d, + 0xaf, 0x74, 0xc2, 0xd1, 0xf9, 0xcd, 0x5d, 0xc3, 0xba, 0xbd, 0x6b, 0x58, 0x3f, 0xee, 0x1a, 0xd6, + 0x97, 0xfb, 0x46, 0xe9, 0xf6, 0xbe, 0x51, 0xfa, 0x76, 0xdf, 0x28, 0x5d, 0x3e, 0x0f, 0xa9, 0x8c, + 0xb2, 0xa1, 0x83, 0x79, 0xe2, 0x62, 0x2e, 0x12, 0x2e, 0xdc, 0xa9, 0xfb, 0x8f, 0x27, 0xdf, 0xe9, + 0xc7, 0xf9, 0x0f, 0x55, 0xfd, 0x96, 0xc3, 0x8a, 0xfa, 0x2e, 0x9f, 0xfc, 0x0c, 0x00, 0x00, 0xff, + 0xff, 0x09, 0x61, 0x41, 0x6b, 0x43, 0x06, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { @@ -249,6 +393,44 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + { + size, err := m.PendingSlashRequests.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + if len(m.OutstandingDowntimeSlashing) > 0 { + for iNdEx := len(m.OutstandingDowntimeSlashing) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OutstandingDowntimeSlashing[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.HeightToValsetUpdateId) > 0 { + for iNdEx := len(m.HeightToValsetUpdateId) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HeightToValsetUpdateId[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } if len(m.InitialValSet) > 0 { for iNdEx := len(m.InitialValSet) - 1; iNdEx >= 0; iNdEx-- { { @@ -371,6 +553,69 @@ func (m *MaturingVSCPacket) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HeightToValsetUpdateID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HeightToValsetUpdateID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HeightToValsetUpdateID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValsetUpdateId != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.ValsetUpdateId)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *OutstandingDowntime) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OutstandingDowntime) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OutstandingDowntime) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ValidatorConsensusAddress) > 0 { + i -= len(m.ValidatorConsensusAddress) + copy(dAtA[i:], m.ValidatorConsensusAddress) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.ValidatorConsensusAddress))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { offset -= sovGenesis(v) base := offset @@ -421,6 +666,20 @@ func (m *GenesisState) Size() (n int) { n += 1 + l + sovGenesis(uint64(l)) } } + if len(m.HeightToValsetUpdateId) > 0 { + for _, e := range m.HeightToValsetUpdateId { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.OutstandingDowntimeSlashing) > 0 { + for _, e := range m.OutstandingDowntimeSlashing { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.PendingSlashRequests.Size() + n += 1 + l + sovGenesis(uint64(l)) return n } @@ -439,6 +698,34 @@ func (m *MaturingVSCPacket) Size() (n int) { return n } +func (m *HeightToValsetUpdateID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovGenesis(uint64(m.Height)) + } + if m.ValsetUpdateId != 0 { + n += 1 + sovGenesis(uint64(m.ValsetUpdateId)) + } + return n +} + +func (m *OutstandingDowntime) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ValidatorConsensusAddress) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + return n +} + func sovGenesis(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -731,6 +1018,107 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HeightToValsetUpdateId", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HeightToValsetUpdateId = append(m.HeightToValsetUpdateId, HeightToValsetUpdateID{}) + if err := m.HeightToValsetUpdateId[len(m.HeightToValsetUpdateId)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OutstandingDowntimeSlashing", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OutstandingDowntimeSlashing = append(m.OutstandingDowntimeSlashing, OutstandingDowntime{}) + if err := m.OutstandingDowntimeSlashing[len(m.OutstandingDowntimeSlashing)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingSlashRequests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PendingSlashRequests.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenesis(dAtA[iNdEx:]) @@ -840,6 +1228,176 @@ func (m *MaturingVSCPacket) Unmarshal(dAtA []byte) error { } return nil } +func (m *HeightToValsetUpdateID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HeightToValsetUpdateID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HeightToValsetUpdateID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateId", wireType) + } + m.ValsetUpdateId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OutstandingDowntime) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OutstandingDowntime: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OutstandingDowntime: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValidatorConsensusAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ValidatorConsensusAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenesis(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/ccv/consumer/types/genesis_test.go b/x/ccv/consumer/types/genesis_test.go index a122619961..3408c81ccb 100644 --- a/x/ccv/consumer/types/genesis_test.go +++ b/x/ccv/consumer/types/genesis_test.go @@ -55,29 +55,29 @@ func TestValidateInitialGenesisState(t *testing.T) { }{ { "valid new consumer genesis state", - types.NewInitialGenesisState(cs, consensusState, valUpdates, params), + types.NewInitialGenesisState(cs, consensusState, valUpdates, types.SlashRequests{}, params), false, }, { "invalid new consumer genesis state: nil client state", - types.NewInitialGenesisState(nil, consensusState, valUpdates, params), + types.NewInitialGenesisState(nil, consensusState, valUpdates, types.SlashRequests{}, params), true, }, { "invalid new consumer genesis state: invalid client state", types.NewInitialGenesisState(&ibctmtypes.ClientState{ChainId: "badClientState"}, - consensusState, valUpdates, params), + consensusState, valUpdates, types.SlashRequests{}, params), true, }, { "invalid new consumer genesis state: nil consensus state", - types.NewInitialGenesisState(cs, nil, valUpdates, params), + types.NewInitialGenesisState(cs, nil, valUpdates, types.SlashRequests{}, params), true, }, { "invalid new consumer genesis state: invalid consensus state", types.NewInitialGenesisState(cs, &ibctmtypes.ConsensusState{Timestamp: time.Now()}, - valUpdates, params), + valUpdates, types.SlashRequests{}, params), true, }, { @@ -91,6 +91,9 @@ func TestValidateInitialGenesisState(t *testing.T) { consensusState, nil, valUpdates, + nil, + nil, + types.SlashRequests{}, }, true, }, @@ -105,6 +108,9 @@ func TestValidateInitialGenesisState(t *testing.T) { consensusState, nil, valUpdates, + nil, + nil, + types.SlashRequests{}, }, true, }, @@ -119,12 +125,15 @@ func TestValidateInitialGenesisState(t *testing.T) { consensusState, []types.MaturingVSCPacket{{}}, valUpdates, + nil, + nil, + types.SlashRequests{}, }, true, }, { "invalid new consumer genesis state: nil initial validator set", - types.NewInitialGenesisState(cs, consensusState, nil, params), + types.NewInitialGenesisState(cs, consensusState, nil, types.SlashRequests{}, params), true, }, { @@ -132,7 +141,7 @@ func TestValidateInitialGenesisState(t *testing.T) { types.NewInitialGenesisState( cs, ibctmtypes.NewConsensusState( time.Now(), commitmenttypes.NewMerkleRoot([]byte("apphash")), []byte("wrong_hash")), - valUpdates, params), + valUpdates, types.SlashRequests{}, params), true, }, } @@ -173,7 +182,7 @@ func TestValidateRestartGenesisState(t *testing.T) { }{ { "valid restart consumer genesis state: empty maturing packets", - types.NewRestartGenesisState("ccvclient", "ccvchannel", nil, valUpdates, params), + types.NewRestartGenesisState("ccvclient", "ccvchannel", nil, valUpdates, nil, nil, params), false, }, { @@ -182,31 +191,31 @@ func TestValidateRestartGenesisState(t *testing.T) { {1, uint64(time.Now().UnixNano())}, {3, uint64(time.Now().UnixNano())}, {5, uint64(time.Now().UnixNano())}, - }, valUpdates, params), + }, valUpdates, nil, nil, params), false, }, { "invalid restart consumer genesis state: channel id is empty", - types.NewRestartGenesisState("", "ccvchannel", nil, valUpdates, params), + types.NewRestartGenesisState("", "ccvchannel", nil, valUpdates, nil, nil, params), true, }, { "invalid restart consumer genesis state: channel id is empty", - types.NewRestartGenesisState("ccvclient", "", nil, valUpdates, params), + types.NewRestartGenesisState("ccvclient", "", nil, valUpdates, nil, nil, params), true, }, { "invalid restart consumer genesis state: maturing packet vscId is invalid", types.NewRestartGenesisState("ccvclient", "ccvchannel", []types.MaturingVSCPacket{ {0, uint64(time.Now().UnixNano())}, - }, valUpdates, params), + }, valUpdates, nil, nil, params), true, }, { "invalid restart consumer genesis state: maturing packet time is invalid", types.NewRestartGenesisState("ccvclient", "ccvchannel", []types.MaturingVSCPacket{ {1, 0}, - }, valUpdates, params), + }, valUpdates, nil, nil, params), true, }, { @@ -220,6 +229,9 @@ func TestValidateRestartGenesisState(t *testing.T) { nil, nil, valUpdates, + nil, + nil, + types.SlashRequests{}, }, true, }, @@ -234,12 +246,15 @@ func TestValidateRestartGenesisState(t *testing.T) { consensusState, nil, valUpdates, + nil, + nil, + types.SlashRequests{}, }, true, }, { "invalid restart consumer genesis state: nil initial validator set", - types.NewRestartGenesisState("ccvclient", "ccvchannel", nil, nil, params), + types.NewRestartGenesisState("ccvclient", "ccvchannel", nil, nil, nil, nil, params), true, }, } diff --git a/x/ccv/consumer/types/keys.go b/x/ccv/consumer/types/keys.go index 0a037f2e7f..a94006840a 100644 --- a/x/ccv/consumer/types/keys.go +++ b/x/ccv/consumer/types/keys.go @@ -4,7 +4,6 @@ import ( "encoding/binary" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/cosmos/cosmos-sdk/types/address" ) const ( @@ -122,8 +121,8 @@ func HeightValsetUpdateIDKey(height uint64) []byte { } // OutstandingDowntimeKey returns the key to a validators' outstanding downtime by consensus address -func OutstandingDowntimeKey(v sdk.ConsAddress) []byte { - return append([]byte{OutstandingDowntimeBytePrefix}, address.MustLengthPrefix(v.Bytes())...) +func OutstandingDowntimeKey(address sdk.ConsAddress) []byte { + return append([]byte{OutstandingDowntimeBytePrefix}, address.Bytes()...) } // CrossChainValidatorKey returns the key to a cross chain validator by consensus address diff --git a/x/ccv/democracy/distribution/doc.go b/x/ccv/democracy/distribution/doc.go new file mode 100644 index 0000000000..2905f74cd4 --- /dev/null +++ b/x/ccv/democracy/distribution/doc.go @@ -0,0 +1,9 @@ +/* +Package distribution defines a "wrapper" module around the Cosmos SDK's native +x/distribution module. In other words, it provides the exact same functionality as +the native module in that it simply embeds the native module. + +The consumer chain should utilize the x/ccv/democracy/distribution module to perform democratic +actions such as participating and voting within the chain's governance system. +*/ +package distribution diff --git a/x/ccv/democracy/distribution/module.go b/x/ccv/democracy/distribution/module.go new file mode 100644 index 0000000000..4c535aad44 --- /dev/null +++ b/x/ccv/democracy/distribution/module.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "time" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/types/module" + + "github.com/cosmos/cosmos-sdk/telemetry" + sdk "github.com/cosmos/cosmos-sdk/types" + distr "github.com/cosmos/cosmos-sdk/x/distribution" + "github.com/cosmos/cosmos-sdk/x/distribution/keeper" + stakingkeeper "github.com/cosmos/cosmos-sdk/x/staking/keeper" + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + + distrtypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + abci "github.com/tendermint/tendermint/abci/types" +) + +var ( + _ module.AppModule = AppModule{} + _ module.AppModuleBasic = AppModuleBasic{} + _ module.AppModuleSimulation = AppModule{} +) + +// AppModule embeds the Cosmos SDK's x/distribution AppModuleBasic. +type AppModuleBasic struct { + distr.AppModuleBasic +} + +// AppModule embeds the Cosmos SDK's x/distribution AppModule +type AppModule struct { + // embed the Cosmos SDK's x/distribution AppModule + distr.AppModule + + keeper keeper.Keeper + accountKeeper distrtypes.AccountKeeper + bankKeeper distrtypes.BankKeeper + stakingKeeper stakingkeeper.Keeper + + feeCollectorName string +} + +// NewAppModule creates a new AppModule object using the native x/distribution module +// AppModule constructor. +func NewAppModule( + cdc codec.Codec, keeper keeper.Keeper, ak distrtypes.AccountKeeper, + bk distrtypes.BankKeeper, sk stakingkeeper.Keeper, feeCollectorName string, +) AppModule { + distrAppMod := distr.NewAppModule(cdc, keeper, ak, bk, sk) + return AppModule{ + AppModule: distrAppMod, + keeper: keeper, + accountKeeper: ak, + bankKeeper: bk, + stakingKeeper: sk, + feeCollectorName: feeCollectorName, + } +} + +// BeginBlocker mirror functionality of cosmos-sdk/distribution BeginBlocker +// however it allocates no proposer reward +func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { + defer telemetry.ModuleMeasureSince(distrtypes.ModuleName, time.Now(), telemetry.MetricKeyBeginBlocker) + + // TODO this is Tendermint-dependent + // ref https://github.com/cosmos/cosmos-sdk/issues/3095 + if ctx.BlockHeight() > 1 { + am.AllocateTokens(ctx) + } +} + +// AllocateTokens handles distribution of the collected fees +func (am AppModule) AllocateTokens( + ctx sdk.Context, +) { + + // fetch and clear the collected fees for distribution, since this is + // called in BeginBlock, collected fees will be from the previous block + // (and distributed to the current representatives) + feeCollector := am.accountKeeper.GetModuleAccount(ctx, consumertypes.ConsumerRedistributeName) + feesCollectedInt := am.bankKeeper.GetAllBalances(ctx, feeCollector.GetAddress()) + feesCollected := sdk.NewDecCoinsFromCoins(feesCollectedInt...) + + // transfer collected fees to the distribution module account + err := am.bankKeeper.SendCoinsFromModuleToModule(ctx, consumertypes.ConsumerRedistributeName, distrtypes.ModuleName, feesCollectedInt) + if err != nil { + panic(err) + } + + // temporary workaround to keep CanWithdrawInvariant happy + // general discussions here: https://github.com/cosmos/cosmos-sdk/issues/2906#issuecomment-441867634 + feePool := am.keeper.GetFeePool(ctx) + vs := am.stakingKeeper.GetValidatorSet() + totalBondedTokens := vs.TotalBondedTokens(ctx) + if totalBondedTokens.IsZero() { + feePool.CommunityPool = feePool.CommunityPool.Add(feesCollected...) + am.keeper.SetFeePool(ctx, feePool) + return + } + + // calculate the fraction allocated to representatives by subtracting the community tax. + // e.g. if community tax is 0.02, representatives fraction will be 0.98 (2% goes to the community pool and the rest to the representatives) + remaining := feesCollected + communityTax := am.keeper.GetCommunityTax(ctx) + representativesFraction := sdk.OneDec().Sub(communityTax) + + // allocate tokens proportionally to representatives voting power + vs.IterateBondedValidatorsByPower(ctx, func(_ int64, validator stakingtypes.ValidatorI) bool { + //we get this validator's percentage of the total power by dividing their tokens by the total bonded tokens + powerFraction := sdk.NewDecFromInt(validator.GetTokens()).QuoTruncate(sdk.NewDecFromInt(totalBondedTokens)) + //we truncate here again, which means that the reward will be slightly lower than it should be + reward := feesCollected.MulDecTruncate(representativesFraction).MulDecTruncate(powerFraction) + am.keeper.AllocateTokensToValidator(ctx, validator, reward) + remaining = remaining.Sub(reward) + + return false + }) + + // allocate community funding + //due to the 3 truncations above, remaining sent to the community pool will be slightly more than it should be. This is OK + feePool.CommunityPool = feePool.CommunityPool.Add(remaining...) + am.keeper.SetFeePool(ctx, feePool) +} diff --git a/x/ccv/staking/doc.go b/x/ccv/democracy/staking/doc.go similarity index 85% rename from x/ccv/staking/doc.go rename to x/ccv/democracy/staking/doc.go index 71c779783a..a405c6fa2d 100644 --- a/x/ccv/staking/doc.go +++ b/x/ccv/democracy/staking/doc.go @@ -6,7 +6,7 @@ overrides two core methods, `InitGenesis` and `EndBlock`. Specifically, these methods perform no-ops and return no validator set updates, as validator sets are tracked by the consumer chain's x/ccv/consumer module. -The consumer chain should utilize the x/ccv/staking module to perform democratic +The consumer chain should utilize the x/ccv/democracy/staking module to perform democratic actions such as participating and voting within the chain's governance system. */ package staking diff --git a/x/ccv/staking/module.go b/x/ccv/democracy/staking/module.go similarity index 100% rename from x/ccv/staking/module.go rename to x/ccv/democracy/staking/module.go diff --git a/x/ccv/provider/client/proposal_handler.go b/x/ccv/provider/client/proposal_handler.go index f6199eaea3..345001c64a 100644 --- a/x/ccv/provider/client/proposal_handler.go +++ b/x/ccv/provider/client/proposal_handler.go @@ -21,21 +21,21 @@ import ( ) // ProposalHandler is the param change proposal handler. -var ProposalHandler = govclient.NewProposalHandler(NewCreateConsumerChainProposalTxCmd, ProposalRESTHandler) +var ProposalHandler = govclient.NewProposalHandler(SubmitConsumerAdditionPropTxCmd, ProposalRESTHandler) -// NewCreateConsumerChainProposalTxCmd returns a CLI command handler for creating -// a new consumer chain proposal governance transaction. -func NewCreateConsumerChainProposalTxCmd() *cobra.Command { +// SubmitConsumerAdditionPropTxCmd returns a CLI command handler for submitting +// a consumer addition proposal via a transaction. +func SubmitConsumerAdditionPropTxCmd() *cobra.Command { return &cobra.Command{ - Use: "create-consumer-chain [proposal-file]", + Use: "consumer-addition [proposal-file]", Args: cobra.ExactArgs(1), - Short: "Submit a consumer chain creation proposal", + Short: "Submit a consumer addition proposal", Long: ` -Submit a consumer chain creation proposal along with an initial deposit. +Submit a consumer addition proposal along with an initial deposit. The proposal details must be supplied via a JSON file. Example: -$ %s tx gov submit-proposal create-consumer-chain --from= +$ %s tx gov submit-proposal consumer-addition --from= Where proposal.json contains: @@ -59,12 +59,12 @@ Where proposal.json contains: return err } - proposal, err := ParseCreateConsumerChainProposalJSON(args[0]) + proposal, err := ParseConsumerAdditionProposalJSON(args[0]) if err != nil { return err } - content := types.NewCreateConsumerChainProposal( + content := types.NewConsumerAdditionProposal( proposal.Title, proposal.Description, proposal.ChainId, proposal.InitialHeight, proposal.GenesisHash, proposal.BinaryHash, proposal.SpawnTime) @@ -85,7 +85,7 @@ Where proposal.json contains: } } -type CreateConsumerChainProposalJSON struct { +type ConsumerAdditionProposalJSON struct { Title string `json:"title"` Description string `json:"description"` ChainId string `json:"chain_id"` @@ -96,7 +96,7 @@ type CreateConsumerChainProposalJSON struct { Deposit string `json:"deposit"` } -type CreateConsumerChainProposalReq struct { +type ConsumerAdditionProposalReq struct { BaseReq rest.BaseReq `json:"base_req"` Proposer sdk.AccAddress `json:"proposer"` @@ -110,8 +110,8 @@ type CreateConsumerChainProposalReq struct { Deposit sdk.Coins `json:"deposit"` } -func ParseCreateConsumerChainProposalJSON(proposalFile string) (CreateConsumerChainProposalJSON, error) { - proposal := CreateConsumerChainProposalJSON{} +func ParseConsumerAdditionProposalJSON(proposalFile string) (ConsumerAdditionProposalJSON, error) { + proposal := ConsumerAdditionProposalJSON{} contents, err := ioutil.ReadFile(filepath.Clean(proposalFile)) if err != nil { @@ -129,14 +129,14 @@ func ParseCreateConsumerChainProposalJSON(proposalFile string) (CreateConsumerCh // change REST handler with a given sub-route. func ProposalRESTHandler(clientCtx client.Context) govrest.ProposalRESTHandler { return govrest.ProposalRESTHandler{ - SubRoute: "create_consumer_chain", + SubRoute: "propose_consumer_addition", Handler: postProposalHandlerFn(clientCtx), } } func postProposalHandlerFn(clientCtx client.Context) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - var req CreateConsumerChainProposalReq + var req ConsumerAdditionProposalReq if !rest.ReadRESTReq(w, r, clientCtx.LegacyAmino, &req) { return } @@ -146,7 +146,7 @@ func postProposalHandlerFn(clientCtx client.Context) http.HandlerFunc { return } - content := types.NewCreateConsumerChainProposal( + content := types.NewConsumerAdditionProposal( req.Title, req.Description, req.ChainId, req.InitialHeight, req.GenesisHash, req.BinaryHash, req.SpawnTime) diff --git a/x/ccv/provider/ibc_module.go b/x/ccv/provider/ibc_module.go index e573478197..8792236d14 100644 --- a/x/ccv/provider/ibc_module.go +++ b/x/ccv/provider/ibc_module.go @@ -16,6 +16,9 @@ import ( ) // OnChanOpenInit implements the IBCModule interface +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-coinit1 +// Spec Tag: [CCV-PCF-COINIT.1] func (am AppModule) OnChanOpenInit( ctx sdk.Context, order channeltypes.Order, @@ -30,6 +33,10 @@ func (am AppModule) OnChanOpenInit( } // OnChanOpenTry implements the IBCModule interface +// +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-cotry1 +// Spec tag: [CCV-PCF-COTRY.1] func (am AppModule) OnChanOpenTry( ctx sdk.Context, order channeltypes.Order, @@ -110,6 +117,9 @@ func validateCCVChannelParams( } // OnChanOpenAck implements the IBCModule interface +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-coack1 +// Spec tag: [CCV-PCF-COACK.1] func (am AppModule) OnChanOpenAck( ctx sdk.Context, portID, @@ -121,6 +131,9 @@ func (am AppModule) OnChanOpenAck( } // OnChanOpenConfirm implements the IBCModule interface +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-coconfirm1 +// Spec tag: [CCV-PCF-COCONFIRM.1] func (am AppModule) OnChanOpenConfirm( ctx sdk.Context, portID, @@ -153,7 +166,7 @@ func (am AppModule) OnChanCloseConfirm( } // OnRecvPacket implements the IBCModule interface. A successful acknowledgement -// is returned if the packet data is succesfully decoded and the receive application +// is returned if the packet data is successfully decoded and the receive application // logic returns without error. func (am AppModule) OnRecvPacket( ctx sdk.Context, diff --git a/x/ccv/provider/ibc_module_test.go b/x/ccv/provider/ibc_module_test.go new file mode 100644 index 0000000000..c2ebbe636f --- /dev/null +++ b/x/ccv/provider/ibc_module_test.go @@ -0,0 +1,336 @@ +package provider_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + conntypes "github.com/cosmos/ibc-go/v3/modules/core/03-connection/types" + channeltypes "github.com/cosmos/ibc-go/v3/modules/core/04-channel/types" + host "github.com/cosmos/ibc-go/v3/modules/core/24-host" + ibctmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" + testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + "github.com/cosmos/interchain-security/x/ccv/provider" + providerkeeper "github.com/cosmos/interchain-security/x/ccv/provider/keeper" + providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" + ccv "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +// TestOnChanOpenInit tests the provider's OnChanOpenInit method against spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-coinit1 +// Spec Tag: [CCV-PCF-COINIT.1] +func TestOnChanOpenInit(t *testing.T) { + + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx( + t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + providerModule := provider.NewAppModule(&providerKeeper) + + // OnChanOpenInit must error for provider even with correct arguments + err := providerModule.OnChanOpenInit( + ctx, + channeltypes.ORDERED, + []string{"connection-1"}, + ccv.ProviderPortID, + "channel-1", + nil, + channeltypes.NewCounterparty(ccv.ConsumerPortID, "channel-1"), + ccv.Version, + ) + require.Error(t, err, "OnChanOpenInit must error on provider chain") +} + +// TestOnChanOpenTry validates the provider's OnChanOpenTry implementation against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-cotry1 +// Spec tag: [CCV-PCF-COTRY.1] +func TestOnChanOpenTry(t *testing.T) { + + // Params for the ChanOpenTry method + type params struct { + ctx sdk.Context + order channeltypes.Order + connectionHops []string + portID string + channelID string + chanCap *capabilitytypes.Capability + counterparty channeltypes.Counterparty + counterpartyVersion string + } + + testCases := []struct { + name string + mutateParams func(*params, *providerkeeper.Keeper) + expPass bool + }{ + { + "success", func(*params, *providerkeeper.Keeper) {}, true, + }, + { + "invalid order", func(params *params, keeper *providerkeeper.Keeper) { + params.order = channeltypes.UNORDERED + }, false, + }, + { + "invalid port ID", func(params *params, keeper *providerkeeper.Keeper) { + params.portID = "bad port" + }, false, + }, + { + "invalid counter party port ID", func(params *params, keeper *providerkeeper.Keeper) { + params.counterparty.PortId = "bad port" + }, false, + }, + { + "invalid counter party version", func(params *params, keeper *providerkeeper.Keeper) { + params.counterpartyVersion = "invalidVersion" + }, false, + }, + { + "unexpected client ID mapped to chain ID", func(params *params, keeper *providerkeeper.Keeper) { + keeper.SetConsumerClientId( + params.ctx, + "consumerChainID", + "invalidClientID", + ) + }, false, + }, + { + "other CCV channel exists for this consumer chain", + func(params *params, keeper *providerkeeper.Keeper) { + keeper.SetChainToChannel( + params.ctx, + "consumerChainID", + "some existing channel ID", + ) + }, false, + }, + } + + for _, tc := range testCases { + + // Setup + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx( + t, testkeeper.NewInMemKeeperParams(t)) + providerModule := provider.NewAppModule(&providerKeeper) + + providerKeeper.SetPort(ctx, ccv.ProviderPortID) + providerKeeper.SetConsumerClientId(ctx, "consumerChainID", "clientIDToConsumer") + + // Instantiate valid params as default. Individual test cases mutate these as needed. + params := params{ + ctx: ctx, + order: channeltypes.ORDERED, + connectionHops: []string{"connectionIDToConsumer"}, + portID: ccv.ProviderPortID, + channelID: "providerChannelID", + chanCap: &capabilitytypes.Capability{}, + counterparty: channeltypes.NewCounterparty(ccv.ConsumerPortID, "consumerChannelID"), + counterpartyVersion: ccv.Version, + } + + // Expected mock calls + moduleAcct := authtypes.ModuleAccount{BaseAccount: &authtypes.BaseAccount{}} + moduleAcct.BaseAccount.Address = authtypes.NewModuleAddress(authtypes.FeeCollectorName).String() + + // Number of calls is not asserted, since not all code paths are hit for failures + gomock.InOrder( + mocks.MockScopedKeeper.EXPECT().ClaimCapability( + params.ctx, params.chanCap, host.ChannelCapabilityPath(params.portID, params.channelID)).AnyTimes(), + mocks.MockConnectionKeeper.EXPECT().GetConnection(ctx, "connectionIDToConsumer").Return( + conntypes.ConnectionEnd{ClientId: "clientIDToConsumer"}, true, + ).AnyTimes(), + mocks.MockClientKeeper.EXPECT().GetClientState(ctx, "clientIDToConsumer").Return( + &ibctmtypes.ClientState{ChainId: "consumerChainID"}, true, + ).AnyTimes(), + mocks.MockAccountKeeper.EXPECT().GetModuleAccount(ctx, "").Return(&moduleAcct).AnyTimes(), + ) + + tc.mutateParams(¶ms, &providerKeeper) + + metadata, err := providerModule.OnChanOpenTry( + params.ctx, + params.order, + params.connectionHops, + params.portID, + params.channelID, + params.chanCap, + params.counterparty, + params.counterpartyVersion, + ) + + if tc.expPass { + require.NoError(t, err) + md := &providertypes.HandshakeMetadata{} + err = md.Unmarshal([]byte(metadata)) + require.NoError(t, err) + require.Equal(t, moduleAcct.BaseAccount.Address, md.ProviderFeePoolAddr, + "returned dist account metadata must match expected") + require.Equal(t, ccv.Version, md.Version, "returned ccv version metadata must match expected") + ctrl.Finish() + } else { + require.Error(t, err) + } + } +} + +// TestOnChanOpenAck tests the provider's OnChanOpenAck method against spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-coack1 +// Spec tag: [CCV-PCF-COACK.1] +func TestOnChanOpenAck(t *testing.T) { + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx( + t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + providerModule := provider.NewAppModule(&providerKeeper) + + // OnChanOpenAck must error for provider even with correct arguments + err := providerModule.OnChanOpenAck( + ctx, + ccv.ProviderPortID, + "providerChannelID", + "consumerChannelID", + ccv.Version, + ) + require.Error(t, err, "OnChanOpenAck must error on provider chain") +} + +// TestOnChanOpenConfirm tests the provider's OnChanOpenConfirm method against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-coconfirm1 +// Spec tag: [CCV-PCF-COCONFIRM.1] +// +// TODO: Validate spec requirement that duplicate channels attempting to become canonical CCV channel are closed. +// See: https://github.com/cosmos/interchain-security/issues/327 +func TestOnChanOpenConfirm(t *testing.T) { + + testCases := []struct { + name string + mockExpectations func(sdk.Context, testkeeper.MockedKeepers) []*gomock.Call + setDuplicateChannel bool + expPass bool + }{ + { + name: "channel not found", + mockExpectations: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) []*gomock.Call { + return []*gomock.Call{ + mocks.MockChannelKeeper.EXPECT().GetChannel( + ctx, ccv.ProviderPortID, gomock.Any()).Return(channeltypes.Channel{}, + false, // Found is false + ).Times(1), + } + }, + expPass: false, + }, + { + name: "too many connection hops", + mockExpectations: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) []*gomock.Call { + return []*gomock.Call{ + mocks.MockChannelKeeper.EXPECT().GetChannel( + ctx, ccv.ProviderPortID, gomock.Any()).Return(channeltypes.Channel{ + State: channeltypes.OPEN, + ConnectionHops: []string{"connectionID", "another"}, // Two hops is two many + }, false, + ).Times(1), + } + }, + expPass: false, + }, + { + name: "connection not found", + mockExpectations: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) []*gomock.Call { + return []*gomock.Call{ + mocks.MockChannelKeeper.EXPECT().GetChannel( + ctx, ccv.ProviderPortID, gomock.Any()).Return(channeltypes.Channel{ + State: channeltypes.OPEN, + ConnectionHops: []string{"connectionID"}, + }, true, + ).Times(1), + mocks.MockConnectionKeeper.EXPECT().GetConnection(ctx, "connectionID").Return( + conntypes.ConnectionEnd{}, false, // Found is false + ).Times(1), + } + }, + expPass: false, + }, + { + name: "client state not found", + mockExpectations: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) []*gomock.Call { + return []*gomock.Call{ + mocks.MockChannelKeeper.EXPECT().GetChannel(ctx, ccv.ProviderPortID, gomock.Any()).Return( + channeltypes.Channel{ + State: channeltypes.OPEN, + ConnectionHops: []string{"connectionID"}, + }, + true, + ).Times(1), + mocks.MockConnectionKeeper.EXPECT().GetConnection(ctx, "connectionID").Return( + conntypes.ConnectionEnd{ClientId: "clientID"}, true, + ).Times(1), + mocks.MockClientKeeper.EXPECT().GetClientState(ctx, "clientID").Return( + nil, false, // Found is false + ).Times(1), + } + }, + expPass: false, + }, + { + name: "CCV channel already exists, error returned, but dup channel is not closed", + mockExpectations: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) []*gomock.Call { + // Error is returned after all expected mock calls are hit for SetConsumerChain + return testkeeper.GetMocksForSetConsumerChain(ctx, &mocks, "consumerChainID") + }, + setDuplicateChannel: true, // Only case where duplicate channel is setup + expPass: false, + }, + { + name: "success", + mockExpectations: func(ctx sdk.Context, mocks testkeeper.MockedKeepers) []*gomock.Call { + // Full SetConsumerChain method should run without error, hitting all expected mocks + return testkeeper.GetMocksForSetConsumerChain(ctx, &mocks, "consumerChainID") + }, + expPass: true, + }, + } + + for _, tc := range testCases { + + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx( + t, testkeeper.NewInMemKeeperParams(t)) + + gomock.InOrder(tc.mockExpectations(ctx, mocks)...) + + if tc.setDuplicateChannel { + providerKeeper.SetChainToChannel(ctx, "consumerChainID", "existingChannelID") + } + + providerModule := provider.NewAppModule(&providerKeeper) + + err := providerModule.OnChanOpenConfirm(ctx, "providerPortID", "channelID") + + if tc.expPass { + + require.NoError(t, err) + // Validate channel mappings + channelID, found := providerKeeper.GetChainToChannel(ctx, "consumerChainID") + require.True(t, found) + require.Equal(t, "channelID", channelID) + + chainID, found := providerKeeper.GetChannelToChain(ctx, "channelID") + require.True(t, found) + require.Equal(t, "consumerChainID", chainID) + + height, found := providerKeeper.GetInitChainHeight(ctx, "consumerChainID") + require.True(t, found) + require.Equal(t, ctx.BlockHeight(), int64(height)) + + } else { + require.Error(t, err) + } + ctrl.Finish() + } +} diff --git a/x/ccv/provider/keeper/genesis.go b/x/ccv/provider/keeper/genesis.go index adba5471a8..3f91a0f02b 100644 --- a/x/ccv/provider/keeper/genesis.go +++ b/x/ccv/provider/keeper/genesis.go @@ -2,6 +2,7 @@ package keeper import ( "fmt" + "time" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/interchain-security/x/ccv/provider/types" @@ -14,7 +15,7 @@ func (k Keeper) InitGenesis(ctx sdk.Context, genState *types.GenesisState) { // Only try to bind to port if it is not already bound, since we may already own // port capability from capability InitGenesis if !k.IsBound(ctx, ccv.ProviderPortID) { - // transfer module binds to the transfer port on InitChain + // CCV module binds to the provider port on InitChain // and claims the returned capability err := k.BindPort(ctx, ccv.ProviderPortID) if err != nil { @@ -22,39 +23,146 @@ func (k Keeper) InitGenesis(ctx sdk.Context, genState *types.GenesisState) { } } + k.SetValidatorSetUpdateId(ctx, genState.ValsetUpdateId) + for _, v2h := range genState.ValsetUpdateIdToHeight { + k.SetValsetUpdateBlockHeight(ctx, v2h.ValsetUpdateId, v2h.Height) + } + + for _, cccp := range genState.ConsumerAdditionProposals { + // prevent implicit memory aliasing + cccp := cccp + if err := k.SetPendingConsumerAdditionProp(ctx, &cccp); err != nil { + panic(fmt.Errorf("pending create consumer chain proposal could not be persisted: %w", err)) + } + } + for _, sccp := range genState.ConsumerRemovalProposals { + k.SetPendingConsumerRemovalProp(ctx, sccp.ChainId, sccp.StopTime) + } + for _, ubdOp := range genState.UnbondingOps { + if err := k.SetUnbondingOp(ctx, ubdOp); err != nil { + panic(fmt.Errorf("unbonding op could not be persisted: %w", err)) + } + } + + if genState.MatureUnbondingOps != nil { + if err := k.AppendMaturedUnbondingOps(ctx, genState.MatureUnbondingOps.Ids); err != nil { + panic(err) + } + } + // Set initial state for each consumer chain - for _, cc := range genState.ConsumerStates { - k.SetChainToChannel(ctx, cc.ChainId, cc.ChannelId) - k.SetChannelToChain(ctx, cc.ChannelId, cc.ChainId) + for _, cs := range genState.ConsumerStates { + chainID := cs.ChainId + k.SetConsumerClientId(ctx, chainID, cs.ClientId) + if err := k.SetConsumerGenesis(ctx, chainID, cs.ConsumerGenesis); err != nil { + panic(fmt.Errorf("consumer chain genesis could not be persisted: %w", err)) + } + if cs.LockUnbondingOnTimeout { + k.SetLockUnbondingOnTimeout(ctx, chainID) + } + // check if the CCV channel was established + if cs.ChannelId != "" { + k.SetChannelToChain(ctx, cs.ChannelId, chainID) + k.SetChainToChannel(ctx, chainID, cs.ChannelId) + k.SetInitChainHeight(ctx, chainID, cs.InitialHeight) + + k.SetSlashAcks(ctx, cs.ChainId, cs.SlashDowntimeAck) + for _, ubdOpIndex := range cs.UnbondingOpsIndex { + k.SetUnbondingOpIndex(ctx, chainID, ubdOpIndex.ValsetUpdateId, ubdOpIndex.UnbondingOpIndex) + } + } else { + for _, vsc := range cs.PendingValsetChanges { + k.AppendPendingVSC(ctx, chainID, vsc) + } + } } k.SetParams(ctx, genState.Params) } func (k Keeper) ExportGenesis(ctx sdk.Context) *types.GenesisState { - store := ctx.KVStore(k.storeKey) - iterator := sdk.KVStorePrefixIterator(store, []byte{types.ChannelToChainBytePrefix}) - defer iterator.Close() - - if !iterator.Valid() { - return types.DefaultGenesisState() - } - var consumerStates []types.ConsumerState + // export states for each consumer chains + k.IterateConsumerChains(ctx, func(ctx sdk.Context, chainID, clientID string) bool { + gen, found := k.GetConsumerGenesis(ctx, chainID) + if !found { + panic(fmt.Errorf("cannot find genesis for consumer chain %s with client %s", chainID, clientID)) + } - for ; iterator.Valid(); iterator.Next() { - // channelID is extracted from bytes in key following the single byte prefix - channelID := string(iterator.Key()[1:]) - chainID := string(iterator.Value()) + // initial consumer chain states + cs := types.ConsumerState{ + ChainId: chainID, + ClientId: clientID, + ConsumerGenesis: gen, + LockUnbondingOnTimeout: k.GetLockUnbondingOnTimeout(ctx, chainID), + } - cc := types.ConsumerState{ - ChainId: chainID, - ChannelId: channelID, + // try to find channel id for the current consumer chain + channelId, found := k.GetChainToChannel(ctx, chainID) + if found { + cs.ChannelId = channelId + cs.InitialHeight, found = k.GetInitChainHeight(ctx, chainID) + if !found { + panic(fmt.Errorf("cannot find genesis for consumer chain %s with client %s", chainID, clientID)) + } + cs.SlashDowntimeAck = k.GetSlashAcks(ctx, chainID) + k.IterateOverUnbondingOpIndex(ctx, chainID, func(vscID uint64, ubdIndex []uint64) bool { + cs.UnbondingOpsIndex = append(cs.UnbondingOpsIndex, + types.UnbondingOpIndex{ValsetUpdateId: vscID, UnbondingOpIndex: ubdIndex}, + ) + return true + }) + } else { + if pendingVSC, found := k.GetPendingVSCs(ctx, chainID); found { + cs.PendingValsetChanges = pendingVSC + } } - consumerStates = append(consumerStates, cc) + + consumerStates = append(consumerStates, cs) + return true + }) + + // export provider chain states + vscID := k.GetValidatorSetUpdateId(ctx) + vscIDToHeights := []types.ValsetUpdateIdToHeight{} + k.IterateValsetUpdateBlockHeight(ctx, func(vscID, height uint64) bool { + vscIDToHeights = append(vscIDToHeights, types.ValsetUpdateIdToHeight{ValsetUpdateId: vscID, Height: height}) + return true + }) + + ubdOps := []ccv.UnbondingOp{} + k.IterateOverUnbondingOps(ctx, func(id uint64, ubdOp ccv.UnbondingOp) bool { + ubdOps = append(ubdOps, ubdOp) + return true + }) + + matureUbdOps, err := k.GetMaturedUnbondingOps(ctx) + if err != nil { + panic(err) } + addProps := []types.ConsumerAdditionProposal{} + k.IteratePendingConsumerAdditionProps(ctx, func(_ time.Time, prop types.ConsumerAdditionProposal) bool { + addProps = append(addProps, prop) + return true + }) + + remProps := []types.ConsumerRemovalProposal{} + k.IteratePendingConsumerRemovalProps(ctx, func(_ time.Time, prop types.ConsumerRemovalProposal) bool { + remProps = append(remProps, prop) + return true + }) + params := k.GetParams(ctx) - return types.NewGenesisState(consumerStates, params) + return types.NewGenesisState( + vscID, + vscIDToHeights, + consumerStates, + ubdOps, + &ccv.MaturedUnbondingOps{Ids: matureUbdOps}, + addProps, + remProps, + params, + ) } diff --git a/x/ccv/provider/keeper/genesis_test.go b/x/ccv/provider/keeper/genesis_test.go new file mode 100644 index 0000000000..2ef06517d3 --- /dev/null +++ b/x/ccv/provider/keeper/genesis_test.go @@ -0,0 +1,153 @@ +package keeper_test + +import ( + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + host "github.com/cosmos/ibc-go/v3/modules/core/24-host" + testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + + consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + "github.com/cosmos/interchain-security/x/ccv/provider/keeper" + "github.com/cosmos/interchain-security/x/ccv/provider/types" + providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" + ccv "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" +) + +func TestIniAndExportGenesis(t *testing.T) { + // create a provider chain genesis populated with two consumer chains + cChainIDs := []string{"c0", "c1"} + expClientID := "client" + oneHourFromNow := time.Now().UTC().Add(time.Hour) + initHeight, vscID := uint64(5), uint64(1) + ubdIndex := []uint64{0, 1, 2} + params := providertypes.DefaultParams() + + // create genesis struct + pGenesis := providertypes.NewGenesisState(vscID, + []providertypes.ValsetUpdateIdToHeight{{ValsetUpdateId: vscID, Height: initHeight}}, + []providertypes.ConsumerState{ + providertypes.NewConsumerStates( + cChainIDs[0], + expClientID, + "channel", + initHeight, + true, + *consumertypes.DefaultGenesisState(), + []providertypes.UnbondingOpIndex{ + {ValsetUpdateId: vscID, UnbondingOpIndex: ubdIndex}, + }, + nil, + []string{"slashedValidatorConsAddress"}, + ), + providertypes.NewConsumerStates( + cChainIDs[1], + expClientID, + "", + 0, + false, + *consumertypes.DefaultGenesisState(), + nil, + []ccv.ValidatorSetChangePacketData{{ValsetUpdateId: vscID}}, + nil, + ), + }, + []ccv.UnbondingOp{{ + Id: vscID, + UnbondingConsumerChains: []string{cChainIDs[0]}, + }}, + &ccv.MaturedUnbondingOps{Ids: ubdIndex}, + []providertypes.ConsumerAdditionProposal{types.ConsumerAdditionProposal{ + ChainId: cChainIDs[0], + SpawnTime: oneHourFromNow, + }}, + []providertypes.ConsumerRemovalProposal{types.ConsumerRemovalProposal{ + ChainId: cChainIDs[0], + StopTime: oneHourFromNow, + }}, + params, + ) + + // Instantiate in-mem provider keeper with mocks + pk, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + + gomock.InOrder( + mocks.MockScopedKeeper.EXPECT().GetCapability( + ctx, host.PortPath(ccv.ProviderPortID), + ).Return(nil, true).Times(1), + ) + + // init provider chain + pk.InitGenesis(ctx, pGenesis) + + // check local provider chain states + ubdOps, found := pk.GetUnbondingOp(ctx, vscID) + require.True(t, found) + require.Equal(t, pGenesis.UnbondingOps[0], ubdOps) + matureUbdOps, err := pk.GetMaturedUnbondingOps(ctx) + require.NoError(t, err) + require.Equal(t, ubdIndex, matureUbdOps) + chainID, found := pk.GetChannelToChain(ctx, pGenesis.ConsumerStates[0].ChannelId) + require.True(t, found) + require.Equal(t, cChainIDs[0], chainID) + require.Equal(t, vscID, pk.GetValidatorSetUpdateId(ctx)) + height, found := pk.GetValsetUpdateBlockHeight(ctx, vscID) + require.True(t, found) + require.Equal(t, initHeight, height) + addProp, found := pk.GetPendingConsumerAdditionProp(ctx, oneHourFromNow, cChainIDs[0]) + require.True(t, found) + require.Equal(t, pGenesis.ConsumerAdditionProposals[0], addProp) + require.True(t, pk.GetPendingConsumerRemovalProp(ctx, cChainIDs[0], oneHourFromNow)) + require.Equal(t, pGenesis.Params, pk.GetParams(ctx)) + + // check provider chain's consumer chain states + assertConsumerChainStates(ctx, t, pk, pGenesis.ConsumerStates...) + + // check the exported genesis + require.Equal(t, pGenesis, pk.ExportGenesis(ctx)) + +} + +func assertConsumerChainStates(ctx sdk.Context, t *testing.T, pk keeper.Keeper, consumerStates ...providertypes.ConsumerState) { + for _, cs := range consumerStates { + chainID := cs.ChainId + gen, found := pk.GetConsumerGenesis(ctx, chainID) + require.True(t, found) + require.Equal(t, *consumertypes.DefaultGenesisState(), gen) + + clientID, found := pk.GetConsumerClientId(ctx, chainID) + require.True(t, found) + require.Equal(t, cs.ClientId, clientID) + + if expChan := cs.ChannelId; expChan != "" { + gotChan, found := pk.GetChainToChannel(ctx, chainID) + require.True(t, found) + require.Equal(t, expChan, gotChan) + } + + if cs.InitialHeight != 0 { + _, found = pk.GetInitChainHeight(ctx, chainID) + require.True(t, found) + } + + require.Equal(t, cs.LockUnbondingOnTimeout, pk.GetLockUnbondingOnTimeout(ctx, chainID)) + + if expVSC := cs.GetPendingValsetChanges(); expVSC != nil { + gotVSC, found := pk.GetPendingVSCs(ctx, chainID) + require.True(t, found) + require.Equal(t, expVSC, gotVSC) + } + + for _, ubdOpIdx := range cs.UnbondingOpsIndex { + ubdIndex, found := pk.GetUnbondingOpIndex(ctx, chainID, ubdOpIdx.ValsetUpdateId) + require.True(t, found) + require.Equal(t, ubdOpIdx.UnbondingOpIndex, ubdIndex) + } + + require.Equal(t, cs.SlashDowntimeAck, pk.GetSlashAcks(ctx, chainID)) + } +} diff --git a/x/ccv/provider/keeper/keeper.go b/x/ccv/provider/keeper/keeper.go index 52980b321e..217ea55c37 100644 --- a/x/ccv/provider/keeper/keeper.go +++ b/x/ccv/provider/keeper/keeper.go @@ -77,26 +77,26 @@ func (k Keeper) Logger(ctx sdk.Context) log.Logger { return ctx.Logger().With("module", "x/"+host.ModuleName+"-"+types.ModuleName) } -// IsBound checks if the transfer module is already bound to the desired port +// IsBound checks if the CCV module is already bound to the desired port func (k Keeper) IsBound(ctx sdk.Context, portID string) bool { _, ok := k.scopedKeeper.GetCapability(ctx, host.PortPath(portID)) return ok } -// BindPort defines a wrapper function for the ort Keeper's function in +// BindPort defines a wrapper function for the port Keeper's function in // order to expose it to module's InitGenesis function func (k Keeper) BindPort(ctx sdk.Context, portID string) error { cap := k.portKeeper.BindPort(ctx, portID) return k.ClaimCapability(ctx, cap, host.PortPath(portID)) } -// GetPort returns the portID for the transfer module. Used in ExportGenesis +// GetPort returns the portID for the CCV module. Used in ExportGenesis func (k Keeper) GetPort(ctx sdk.Context) string { store := ctx.KVStore(k.storeKey) return string(store.Get(types.PortKey())) } -// SetPort sets the portID for the transfer module. Used in InitGenesis +// SetPort sets the portID for the CCV module. Used in InitGenesis func (k Keeper) SetPort(ctx sdk.Context, portID string) { store := ctx.KVStore(k.storeKey) store.Set(types.PortKey(), []byte(portID)) @@ -135,10 +135,10 @@ func (k Keeper) DeleteChainToChannel(ctx sdk.Context, chainID string) { store.Delete(types.ChainToChannelKey(chainID)) } -// IterateConsumerChains iterates over all of the consumer chains that the provider module controls. -// It calls the provided callback function which takes in a chainID and returns +// IterateConsumerChains iterates over all of the consumer chains that the provider module controls +// It calls the provided callback function which takes in a chainID and client ID to return // a stop boolean which will stop the iteration. -func (k Keeper) IterateConsumerChains(ctx sdk.Context, cb func(ctx sdk.Context, chainID string) (stop bool)) { +func (k Keeper) IterateConsumerChains(ctx sdk.Context, cb func(ctx sdk.Context, chainID, clientID string) (stop bool)) { store := ctx.KVStore(k.storeKey) iterator := sdk.KVStorePrefixIterator(store, []byte{types.ChainToClientBytePrefix}) defer iterator.Close() @@ -150,9 +150,9 @@ func (k Keeper) IterateConsumerChains(ctx sdk.Context, cb func(ctx sdk.Context, for ; iterator.Valid(); iterator.Next() { // remove 1 byte prefix from key to retrieve chainID chainID := string(iterator.Key()[1:]) + clientID := string(iterator.Value()) - stop := cb(ctx, chainID) - if stop { + if !cb(ctx, chainID, clientID) { return } } @@ -228,6 +228,11 @@ func (k Keeper) GetConsumerGenesis(ctx sdk.Context, chainID string) (consumertyp return data, true } +func (k Keeper) DeleteConsumerGenesis(ctx sdk.Context, chainID string) { + store := ctx.KVStore(k.storeKey) + store.Delete(types.ConsumerGenesisKey(chainID)) +} + // VerifyConsumerChain verifies that the chain trying to connect on the channel handshake // is the expected consumer chain. func (k Keeper) VerifyConsumerChain(ctx sdk.Context, channelID string, connectionHops []string) error { @@ -266,14 +271,14 @@ func (k Keeper) SetConsumerChain(ctx sdk.Context, channelID string) error { return sdkerrors.Wrap(channeltypes.ErrTooManyConnectionHops, "must have direct connection to consumer chain") } connectionID := channel.ConnectionHops[0] - chainID, tmClient, err := k.getUnderlyingClient(ctx, connectionID) + _, tmClient, err := k.getUnderlyingClient(ctx, connectionID) if err != nil { return err } // Verify that there isn't already a CCV channel for the consumer chain - // If there is, then close the channel. - if prevChannel, ok := k.GetChannelToChain(ctx, chainID); ok { - return sdkerrors.Wrapf(ccv.ErrDuplicateChannel, "CCV channel with ID: %s already created for consumer chain %s", prevChannel, chainID) + chainID := tmClient.ChainId + if prevChannelID, ok := k.GetChainToChannel(ctx, chainID); ok { + return sdkerrors.Wrapf(ccv.ErrDuplicateChannel, "CCV channel with ID: %s already created for consumer chain %s", prevChannelID, chainID) } // the CCV channel is established: @@ -312,13 +317,35 @@ func (k Keeper) DeleteUnbondingOp(ctx sdk.Context, id uint64) { store.Delete(types.UnbondingOpKey(id)) } +func (k Keeper) IterateOverUnbondingOps(ctx sdk.Context, cb func(id uint64, ubdOp ccv.UnbondingOp) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.UnbondingOpBytePrefix}) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + id := binary.BigEndian.Uint64(iterator.Key()[1:]) + bz := iterator.Value() + if bz == nil { + panic(fmt.Errorf("unbonding operation is nil for id %d", id)) + } + ubdOp := types.MustUnmarshalUnbondingOp(k.cdc, bz) + + if !cb(id, ubdOp) { + break + } + } +} + // This index allows retreiving UnbondingDelegationEntries by chainID and valsetUpdateID func (k Keeper) SetUnbondingOpIndex(ctx sdk.Context, chainID string, valsetUpdateID uint64, IDs []uint64) { store := ctx.KVStore(k.storeKey) - bz, err := json.Marshal(IDs) + index := ccv.UnbondingOpsIndex{ + Ids: IDs, + } + bz, err := index.Marshal() if err != nil { - panic("Failed to JSON marshal") + panic("Failed to marshal UnbondingOpsIndex") } store.Set(types.UnbondingOpIndexKey(chainID, valsetUpdateID), bz) @@ -340,13 +367,12 @@ func (k Keeper) IterateOverUnbondingOpIndex(ctx sdk.Context, chainID string, cb } vscID = binary.BigEndian.Uint64(vscBytes) - var ids []uint64 - err = json.Unmarshal(iterator.Value(), &ids) - if err != nil { + var index ccv.UnbondingOpsIndex + if err = index.Unmarshal(iterator.Value()); err != nil { panic("Failed to unmarshal JSON") } - if !cb(vscID, ids) { + if !cb(vscID, index.GetIds()) { return } } @@ -361,13 +387,12 @@ func (k Keeper) GetUnbondingOpIndex(ctx sdk.Context, chainID string, valsetUpdat return []uint64{}, false } - var ids []uint64 - err := json.Unmarshal(bz, &ids) - if err != nil { - panic("Failed to JSON unmarshal") + var idx ccv.UnbondingOpsIndex + if err := idx.Unmarshal(bz); err != nil { + panic("Failed to unmarshal UnbondingOpsIndex") } - return ids, true + return idx.GetIds(), true } // This index allows retreiving UnbondingDelegationEntries by chainID and valsetUpdateID @@ -401,11 +426,12 @@ func (k Keeper) GetMaturedUnbondingOps(ctx sdk.Context) (ids []uint64, err error if bz == nil { return nil, nil } - err = json.Unmarshal(bz, &ids) - if err != nil { + + var ops ccv.MaturedUnbondingOps + if err := ops.Unmarshal(bz); err != nil { return nil, err } - return ids, nil + return ops.GetIds(), nil } // AppendMaturedUnbondingOps adds a list of ids to the list of matured unbonding operation ids @@ -417,11 +443,13 @@ func (k Keeper) AppendMaturedUnbondingOps(ctx sdk.Context, ids []uint64) error { if err != nil { return err } - // append works also on a nil list - existingIds = append(existingIds, ids...) + + maturedOps := ccv.MaturedUnbondingOps{ + Ids: append(existingIds, ids...), + } store := ctx.KVStore(k.storeKey) - bz, err := json.Marshal(existingIds) + bz, err := maturedOps.Marshal() if err != nil { return err } @@ -440,21 +468,27 @@ func (k Keeper) EmptyMaturedUnbondingOps(ctx sdk.Context) ([]uint64, error) { return ids, nil } -func (k Keeper) getUnderlyingClient(ctx sdk.Context, connectionID string) (string, *ibctmtypes.ClientState, error) { - // Retrieve the underlying client state. +// Retrieves the underlying client state corresponding to a connection ID. +func (k Keeper) getUnderlyingClient(ctx sdk.Context, connectionID string) ( + clientID string, tmClient *ibctmtypes.ClientState, err error) { + conn, ok := k.connectionKeeper.GetConnection(ctx, connectionID) if !ok { - return "", nil, sdkerrors.Wrapf(conntypes.ErrConnectionNotFound, "connection not found for connection ID: %s", connectionID) + return "", nil, sdkerrors.Wrapf(conntypes.ErrConnectionNotFound, + "connection not found for connection ID: %s", connectionID) } - client, ok := k.clientKeeper.GetClientState(ctx, conn.ClientId) + clientID = conn.ClientId + clientState, ok := k.clientKeeper.GetClientState(ctx, clientID) if !ok { - return "", nil, sdkerrors.Wrapf(clienttypes.ErrClientNotFound, "client not found for client ID: %s", conn.ClientId) + return "", nil, sdkerrors.Wrapf(clienttypes.ErrClientNotFound, + "client not found for client ID: %s", conn.ClientId) } - tmClient, ok := client.(*ibctmtypes.ClientState) + tmClient, ok = clientState.(*ibctmtypes.ClientState) if !ok { - return "", nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, "invalid client type. expected %s, got %s", ibcexported.Tendermint, client.ClientType()) + return "", nil, sdkerrors.Wrapf(clienttypes.ErrInvalidClientType, + "invalid client type. expected %s, got %s", ibcexported.Tendermint, clientState.ClientType()) } - return conn.ClientId, tmClient, nil + return clientID, tmClient, nil } // chanCloseInit defines a wrapper function for the channel Keeper's function @@ -521,7 +555,7 @@ func (k *Keeper) Hooks() StakingHooks { func (h StakingHooks) AfterUnbondingInitiated(ctx sdk.Context, ID uint64) { var consumerChainIDS []string - h.k.IterateConsumerChains(ctx, func(ctx sdk.Context, chainID string) (stop bool) { + h.k.IterateConsumerChains(ctx, func(ctx sdk.Context, chainID, clientID string) (stop bool) { consumerChainIDS = append(consumerChainIDS, chainID) return false }) @@ -571,6 +605,23 @@ func (k Keeper) GetValsetUpdateBlockHeight(ctx sdk.Context, valsetUpdateId uint6 return binary.BigEndian.Uint64(bz), true } +// IterateSlashAcks iterates through the slash acks set in the store +func (k Keeper) IterateValsetUpdateBlockHeight(ctx sdk.Context, cb func(valsetUpdateId, height uint64) bool) { + store := ctx.KVStore(k.storeKey) + iterator := sdk.KVStorePrefixIterator(store, []byte{types.ValsetUpdateBlockHeightBytePrefix}) + + defer iterator.Close() + for ; iterator.Valid(); iterator.Next() { + + valsetUpdateId := binary.BigEndian.Uint64(iterator.Key()[1:]) + height := binary.BigEndian.Uint64(iterator.Value()) + + if !cb(valsetUpdateId, height) { + return + } + } +} + // DeleteValsetUpdateBlockHeight deletes the block height value for a given vaset update id func (k Keeper) DeleteValsetUpdateBlockHeight(ctx sdk.Context, valsetUpdateId uint64) { store := ctx.KVStore(k.storeKey) @@ -580,12 +631,15 @@ func (k Keeper) DeleteValsetUpdateBlockHeight(ctx sdk.Context, valsetUpdateId ui // SetSlashAcks sets the slash acks under the given chain ID func (k Keeper) SetSlashAcks(ctx sdk.Context, chainID string, acks []string) { store := ctx.KVStore(k.storeKey) - buf := &bytes.Buffer{} - err := json.NewEncoder(buf).Encode(acks) + + sa := types.SlashAcks{ + Addresses: acks, + } + bz, err := sa.Marshal() if err != nil { - panic("failed to encode json") + panic("failed to marshal SlashAcks") } - store.Set(types.SlashAcksKey(chainID), buf.Bytes()) + store.Set(types.SlashAcksKey(chainID), bz) } // GetSlashAcks returns the slash acks stored under the given chain ID @@ -595,15 +649,12 @@ func (k Keeper) GetSlashAcks(ctx sdk.Context, chainID string) []string { if bz == nil { return nil } - var acks []string - buf := bytes.NewBuffer(bz) - - err := json.NewDecoder(buf).Decode(&acks) - if err != nil { + var acks types.SlashAcks + if err := acks.Unmarshal(bz); err != nil { panic(fmt.Errorf("failed to decode json: %w", err)) } - return acks + return acks.GetAddresses() } // EmptySlashAcks empties and returns the slash acks for a given chain ID @@ -627,15 +678,13 @@ func (k Keeper) IterateSlashAcks(ctx sdk.Context, cb func(chainID string, acks [ chainID := string(iterator.Key()[1:]) - var data []string - buf := bytes.NewBuffer(iterator.Value()) - - err := json.NewDecoder(buf).Decode(&data) + var sa types.SlashAcks + err := sa.Unmarshal(iterator.Value()) if err != nil { - panic(fmt.Errorf("failed to decode json: %w", err)) + panic(fmt.Errorf("failed to unmarshal SlashAcks: %w", err)) } - if !cb(chainID, data) { + if !cb(chainID, sa.GetAddresses()) { return } } @@ -706,7 +755,11 @@ func (k Keeper) AppendPendingVSC(ctx sdk.Context, chainID string, packet ccv.Val packets, _ := k.GetPendingVSCs(ctx, chainID) // append works also on a nil list packets = append(packets, packet) + k.SetPendingVSCs(ctx, chainID, packets) +} +// SetPendingVSCs writes a list of VSCs to store associated to chainID +func (k Keeper) SetPendingVSCs(ctx sdk.Context, chainID string, packets []ccv.ValidatorSetChangePacketData) { store := ctx.KVStore(k.storeKey) var data [][]byte for _, p := range packets { @@ -726,13 +779,12 @@ func (k Keeper) AppendPendingVSC(ctx sdk.Context, chainID string, packet ccv.Val // EmptyPendingVSC empties and returns the list of pending ValidatorSetChange packets for chain ID (if it exists) func (k Keeper) EmptyPendingVSC(ctx sdk.Context, chainID string) (packets []ccv.ValidatorSetChangePacketData) { - packets, found := k.GetPendingVSCs(ctx, chainID) - if !found { - // there is no list of pending ValidatorSetChange packets - return nil + existing, found := k.GetPendingVSCs(ctx, chainID) + if found { + packets = existing + store := ctx.KVStore(k.storeKey) + store.Delete(types.PendingVSCsKey(chainID)) } - store := ctx.KVStore(k.storeKey) - store.Delete(types.PendingVSCsKey(chainID)) return packets } diff --git a/x/ccv/provider/keeper/keeper_test.go b/x/ccv/provider/keeper/keeper_test.go index 6bd62e2b8d..c201f53e33 100644 --- a/x/ccv/provider/keeper/keeper_test.go +++ b/x/ccv/provider/keeper/keeper_test.go @@ -3,7 +3,6 @@ package keeper_test import ( "testing" - capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" "github.com/golang/mock/gomock" @@ -24,7 +23,8 @@ import ( // TestValsetUpdateBlockHeight tests the getter, setter, and deletion methods for valset updates mapped to block height func TestValsetUpdateBlockHeight(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() blockHeight, found := providerKeeper.GetValsetUpdateBlockHeight(ctx, uint64(0)) require.False(t, found) @@ -49,7 +49,8 @@ func TestValsetUpdateBlockHeight(t *testing.T) { // TestSlashAcks tests the getter, setter, iteration, and deletion methods for stored slash acknowledgements func TestSlashAcks(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() var chainsAcks [][]string @@ -91,7 +92,8 @@ func TestSlashAcks(t *testing.T) { // TestAppendSlashAck tests the append method for stored slash acknowledgements func TestAppendSlashAck(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() p := []string{"alice", "bob", "charlie"} chains := []string{"c1", "c2"} @@ -110,7 +112,8 @@ func TestAppendSlashAck(t *testing.T) { // TestPendingVSCs tests the getter, appending, and deletion methods for stored pending VSCs func TestPendingVSCs(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() chainID := "consumer" @@ -164,7 +167,8 @@ func TestPendingVSCs(t *testing.T) { // TestInitHeight tests the getter and setter methods for the stored block heights (on provider) when a given consumer chain was started func TestInitHeight(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() tc := []struct { chainID string @@ -186,13 +190,12 @@ func TestInitHeight(t *testing.T) { // TestHandleSlashPacketDoubleSigning tests the handling of a double-signing related slash packet, with mocks and unit tests func TestHandleSlashPacketDoubleSigning(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() chainId := "consumer" infractionHeight := int64(5) - cdc, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) + keeperParams := testkeeper.NewInMemKeeperParams(t) + ctx := keeperParams.Ctx slashPacket := ccv.NewSlashPacketData( abci.Validator{Address: ed25519.GenPrivKey().PubKey().Address(), @@ -201,8 +204,11 @@ func TestHandleSlashPacketDoubleSigning(t *testing.T) { stakingtypes.DoubleSign, ) - mockStakingKeeper := testkeeper.NewMockStakingKeeper(ctrl) - mockSlashingKeeper := testkeeper.NewMockSlashingKeeper(ctrl) + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mocks := testkeeper.NewMockedKeepers(ctrl) + mockSlashingKeeper := mocks.MockSlashingKeeper + mockStakingKeeper := mocks.MockStakingKeeper // Setup expected mock calls gomock.InOrder( @@ -235,19 +241,7 @@ func TestHandleSlashPacketDoubleSigning(t *testing.T) { evidencetypes.DoubleSignJailEndTime).Times(1), ) - providerKeeper := testkeeper.GetProviderKeeperWithMocks( - cdc, - storeKey, - paramsSubspace, - capabilitykeeper.ScopedKeeper{}, - testkeeper.NewMockChannelKeeper(ctrl), - testkeeper.NewMockPortKeeper(ctrl), - testkeeper.NewMockConnectionKeeper(ctrl), - testkeeper.NewMockClientKeeper(ctrl), - mockStakingKeeper, - mockSlashingKeeper, - testkeeper.NewMockAccountKeeper(ctrl), - ) + providerKeeper := testkeeper.NewInMemProviderKeeper(keeperParams, mocks) providerKeeper.SetInitChainHeight(ctx, chainId, uint64(infractionHeight)) @@ -257,7 +251,10 @@ func TestHandleSlashPacketDoubleSigning(t *testing.T) { } func TestIterateOverUnbondingOpIndex(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + chainID := "6" // mock an unbonding index @@ -280,7 +277,9 @@ func TestIterateOverUnbondingOpIndex(t *testing.T) { } func TestMaturedUnbondingOps(t *testing.T) { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() ids, err := providerKeeper.GetMaturedUnbondingOps(ctx) require.NoError(t, err) diff --git a/x/ccv/provider/keeper/keymap.go b/x/ccv/provider/keeper/keymap.go new file mode 100644 index 0000000000..b97f9ab1a3 --- /dev/null +++ b/x/ccv/provider/keeper/keymap.go @@ -0,0 +1,37 @@ +package keeper + +func (k Keeper) getPkToCk() map[PK]CK { + _ = k + // TODO: implement + panic("not implemented") +} + +func (k Keeper) getCkToPk() map[CK]PK { + _ = k + // TODO: implement + panic("not implemented") +} + +func (k Keeper) getCkToMemo() map[CK]memo { + _ = k + // TODO: implement + panic("not implemented") +} + +func (k Keeper) setPkToCk(e map[PK]CK) { + _ = k + // TODO: implement + panic("not implemented") +} + +func (k Keeper) setCkToPk(e map[CK]PK) { + _ = k + // TODO: implement + panic("not implemented") +} + +func (k Keeper) setCkToMemo(e map[CK]memo) { + _ = k + // TODO: implement + panic("not implemented") +} diff --git a/x/ccv/provider/keeper/keymap_bytes.py b/x/ccv/provider/keeper/keymap_bytes.py new file mode 100644 index 0000000000..cf5a4703f2 --- /dev/null +++ b/x/ccv/provider/keeper/keymap_bytes.py @@ -0,0 +1,20 @@ +num_chains = 20 +num_validators = 175 +key_bytes = 64 + + +def compute_usage(): + + # must store 4 keys per validator + fixed = num_chains * num_validators * 4 * key_bytes + + # in a very pessimistic case, we must store 10 + # old keys, for each validator + pessimism_factor = 10 * key_bytes + return fixed * pessimism_factor + + +bytes = compute_usage() +kibiytes = bytes / 1024 +mibibytes = kibiytes / 1024 +print(mibibytes) diff --git a/x/ccv/provider/keeper/keymap_core.go b/x/ccv/provider/keeper/keymap_core.go new file mode 100644 index 0000000000..a6e3089c53 --- /dev/null +++ b/x/ccv/provider/keeper/keymap_core.go @@ -0,0 +1,261 @@ +package keeper + +import ( + "errors" +) + +type PK = int +type CK = int +type VSCID = int + +type update struct { + key int + power int +} + +type memo struct { + ck CK + pk PK + vscid int + power int +} + +// TODO: +// 1. Integrate into kv store. +// 2. integrate into Provider::EndBlock, +// 3. integrate with create/destroy validator +// 4. TODO: document this file + +type KeyMap struct { + store Store + pkToCk map[PK]CK + ckToPk map[CK]PK + ckToMemo map[CK]memo +} + +type Store interface { + getPkToCk() map[PK]CK + getCkToPk() map[CK]PK + getCkToMemo() map[CK]memo + setPkToCk(map[PK]CK) + setCkToPk(map[CK]PK) + setCkToMemo(map[CK]memo) +} + +func MakeKeyMap(store Store) KeyMap { + return KeyMap{ + store: store, + } +} + +// GetAll reads all data from store +// The granularity of store access can be changed if needed for +// performance reasons. +func (e *KeyMap) GetAll() { + e.pkToCk = e.store.getPkToCk() + e.ckToPk = e.store.getCkToPk() + e.ckToMemo = e.store.getCkToMemo() +} + +// SetAll write all data to store +// The granularity of store access can be changed if needed for +// performance reasons. +func (e *KeyMap) SetAll() { + e.store.setPkToCk(e.pkToCk) + e.store.setCkToPk(e.ckToPk) + e.store.setCkToMemo(e.ckToMemo) +} + +// TODO: +func (e *KeyMap) SetProviderKeyToConsumerKey(pk PK, ck CK) error { + e.GetAll() + if _, ok := e.ckToPk[ck]; ok { + return errors.New(`cannot reuse key which is in use or was recently in use`) + } + if _, ok := e.ckToMemo[ck]; ok { + return errors.New(`cannot reuse key which is in use or was recently in use`) + } + if oldCk, ok := e.pkToCk[pk]; ok { + delete(e.ckToPk, oldCk) + } + e.pkToCk[pk] = ck + e.ckToPk[ck] = pk + e.SetAll() // TODO: Try with defer + return nil +} + +// TODO: do regular query (CK for PK) + +// TODO: +func (e *KeyMap) GetProviderKey(ck CK) (PK, error) { + e.GetAll() + if u, ok := e.ckToMemo[ck]; ok { + return u.pk, nil + } else if pk, ok := e.ckToPk[ck]; ok { + return pk, nil + } else { + return -1, errors.New("provider key not found for consumer key") + } +} + +// TODO: +func (e *KeyMap) PruneUnusedKeys(latestVscid VSCID) { + e.GetAll() + toDel := []CK{} + for _, u := range e.ckToMemo { + // If the last update was a deletion (0 power) and the update + // matured then pruning is possible. + if u.power == 0 && u.vscid <= latestVscid { + toDel = append(toDel, u.ck) + } + } + for _, ck := range toDel { + delete(e.ckToMemo, ck) + } + e.SetAll() +} + +// TODO: +func (e *KeyMap) ComputeUpdates(vscid VSCID, providerUpdates []update) (consumerUpdates []update) { + + e.GetAll() + + updates := map[PK]int{} + + for _, u := range providerUpdates { + updates[u.key] = u.power + } + + updates = e.inner(vscid, updates) + + consumerUpdates = []update{} + + for ck, power := range updates { + consumerUpdates = append(consumerUpdates, update{key: ck, power: power}) + } + + e.SetAll() + return consumerUpdates +} + +// do inner work as part of ComputeUpdates +func (e *KeyMap) inner(vscid VSCID, providerUpdates map[PK]int) map[CK]int { + + pks := []PK{} + + // Grab provider keys where the assigned consumer key has changed + for oldCk, u := range e.ckToMemo { + if newCk, ok := e.pkToCk[u.pk]; ok { + if oldCk != newCk && 0 < u.power { + pks = append(pks, u.pk) + } + } + } + // Grab provider keys where the validator power has changed + for pk := range providerUpdates { + pks = append(pks, pk) + } + + ret := map[CK]int{} + + // Create a read only copy, so that we can query while writing + // updates to the old version. + ckToMemo_READ_ONLY := map[CK]memo{} + for ck, memo := range e.ckToMemo { + ckToMemo_READ_ONLY[ck] = memo + } + + for _, pk := range pks { + for _, u := range ckToMemo_READ_ONLY { + if u.pk == pk && 0 < u.power { + // For each provider key for which there was already a positive update + // create a deletion update for the associated consumer key. + e.ckToMemo[u.ck] = memo{ck: u.ck, pk: pk, vscid: vscid, power: 0} + ret[u.ck] = 0 + } + } + } + + for _, pk := range pks { + // For each provider key where there was either + // 1) already a positive power update + // 2) the validator power has changed (and is still positive) + // create a change update for the associated consumer key. + + power := 0 + for _, u := range ckToMemo_READ_ONLY { + if u.pk == pk && 0 < u.power { + // There was previously a positive power update: copy it. + power = u.power + } + } + // There is a new validator power: use it. + if newPower, ok := providerUpdates[pk]; ok { + power = newPower + } + // Only ship update with positive powers. Zero power updates (deletions) + // are handled in earlier block. + if 0 < power { + ck := e.pkToCk[pk] + e.ckToMemo[ck] = memo{ck: ck, pk: pk, vscid: vscid, power: power} + ret[ck] = power + } + } + + return ret +} + +// Returns true iff internal invariants hold +func (e *KeyMap) internalInvariants() bool { + + e.GetAll() + + // No two provider keys can map to the same consumer key + // (pkToCk is sane) + seen := map[CK]bool{} + for _, ck := range e.pkToCk { + if seen[ck] { + return false + } + seen[ck] = true + } + + // all values of pkToCk is a key of ckToPk + // (reverse lookup is always possible) + for _, ck := range e.pkToCk { + if _, ok := e.ckToPk[ck]; !ok { + return false + } + } + + // All consumer keys mapping to provider keys are actually + // mapped to by the provider key. + // (ckToPk is sane) + for ck := range e.ckToPk { + good := false + for _, candidateCk := range e.pkToCk { + if candidateCk == ck { + good = true + break + } + } + if !good { + return false + } + } + + // If a consumer key is mapped to a provider key (currently) + // any memo containing the same consumer key has the same + // mapping. + // (Ensures lookups are correct) + for ck, pk := range e.ckToPk { + if u, ok := e.ckToMemo[ck]; ok { + if pk != u.pk { + return false + } + } + } + + return true + +} diff --git a/x/ccv/provider/keeper/keymap_core_test.go b/x/ccv/provider/keeper/keymap_core_test.go new file mode 100644 index 0000000000..2c978c63ae --- /dev/null +++ b/x/ccv/provider/keeper/keymap_core_test.go @@ -0,0 +1,524 @@ +package keeper + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +// Num traces to run for heuristic testing +const NUM_TRACES = 1000 + +// Len of trace for a single heuristic testing run +const TRACE_LEN = 1001 + +// Number of validators to simulate +const NUM_VALS = 4 + +// Number of consumer keys in the universe +// (This is constrained to ensure overlap edge cases are tested) +const NUM_CKS = 50 + +type store struct { + pkToCk map[PK]CK + ckToPk map[CK]PK + ckToMemo map[CK]memo +} + +func makeStore() store { + return store{ + pkToCk: map[PK]CK{}, + ckToPk: map[CK]PK{}, + ckToMemo: map[CK]memo{}, + } +} +func (s *store) getPkToCk() map[PK]CK { + return s.pkToCk +} +func (s *store) getCkToPk() map[CK]PK { + return s.ckToPk +} +func (s *store) getCkToMemo() map[CK]memo { + return s.ckToMemo +} +func (s *store) setPkToCk(e map[PK]CK) { + s.pkToCk = e +} +func (s *store) setCkToPk(e map[CK]PK) { + s.ckToPk = e +} +func (s *store) setCkToMemo(e map[CK]memo) { + s.ckToMemo = e +} + +type keyMapEntry struct { + pk PK + ck CK +} + +type traceStep struct { + keyMapEntries []keyMapEntry + providerUpdates []update + timeProvider int + timeConsumer int + timeMaturity int +} + +type driver struct { + t *testing.T + km *KeyMap + trace []traceStep + lastTimeProvider int + lastTimeConsumer int + lastTimeMaturity int + // indexed by time (starting at 0) + mappings []map[PK]CK + // indexed by time (starting at 0) + consumerUpdates [][]update + // indexed by time (starting at 0) + providerValsets []valset + // The validator set from the perspective of + // the consumer chain. + consumerValsets valset +} + +func makeDriver(t *testing.T, trace []traceStep) driver { + d := driver{} + d.t = t + s := makeStore() + require.NotNil(t, s.ckToMemo) + kd := MakeKeyMap(&s) + d.km = &kd + d.trace = trace + d.lastTimeProvider = 0 + d.lastTimeConsumer = 0 + d.lastTimeMaturity = 0 + d.mappings = []map[PK]CK{} + d.consumerUpdates = [][]update{} + d.providerValsets = []valset{} + d.consumerValsets = valset{} + return d +} + +// Utility struct to make simulating a validator set easier. +type valset struct { + keyToPower map[int]int +} + +func makeValset() valset { + return valset{keyToPower: map[int]int{}} +} + +// Apply a batch of (key, power) updates to the known validator set. +func (vs *valset) applyUpdates(updates []update) { + for _, u := range updates { + delete(vs.keyToPower, u.key) + if 0 < u.power { + vs.keyToPower[u.key] = u.power + } + } +} + +// Apply a list of (pk, ck) mapping requests to the KeyDel class instance +func (d *driver) applyKeyMapEntries(entries []keyMapEntry) { + for _, e := range entries { + // TRY to map provider key pk to consumer key ck. + // (May fail due to API constraints, this is correct) + _ = d.km.SetProviderKeyToConsumerKey(e.pk, e.ck) + } + // Duplicate the mapping for referencing later in tests. + copy := map[PK]CK{} + for lk, fk := range d.km.pkToCk { + copy[lk] = fk + } + d.mappings = append(d.mappings, copy) +} + +// Apply a list of provider validator power updates +func (d *driver) applyProviderUpdates(providerUPdates []update) { + // Duplicate the previous valSet so that it can be referenced + // later in tests. + valSet := makeValset() + for pk, power := range d.providerValsets[d.lastTimeProvider].keyToPower { + valSet.keyToPower[pk] = power + } + valSet.applyUpdates(providerUPdates) + d.providerValsets = append(d.providerValsets, valSet) +} + +// Run a trace +// This includes bootstrapping the data structure with the first (init) +// step of the trace, and running a sequence of steps afterwards. +// Internal and external invariants (properties) of the data structure +// are tested after each step. +func (d *driver) run() { + + // Initialise + { + init := d.trace[0] + // Set the initial map + d.applyKeyMapEntries(init.keyMapEntries) + // Set the initial provider set + d.providerValsets = append(d.providerValsets, makeValset()) + d.providerValsets[init.timeProvider].applyUpdates(init.providerUpdates) + // Set the initial consumer set + d.consumerUpdates = append(d.consumerUpdates, d.km.ComputeUpdates(init.timeProvider, init.providerUpdates)) + // The first consumer set equal to the provider set at time 0 + d.consumerValsets = makeValset() + d.consumerValsets.applyUpdates(d.consumerUpdates[init.timeConsumer]) + d.km.PruneUnusedKeys(init.timeMaturity) + } + + // Sanity check the initial state + require.Len(d.t, d.mappings, 1) + require.Len(d.t, d.consumerUpdates, 1) + require.Len(d.t, d.providerValsets, 1) + + // Check properties for each step after the initial one + for _, s := range d.trace[1:] { + if d.lastTimeProvider < s.timeProvider { + // Provider time increase: + // Apply some new key mapping requests to KeyDel, and create new validator + // power updates. + d.applyKeyMapEntries(s.keyMapEntries) + d.applyProviderUpdates(s.providerUpdates) + // Store the updates, to reference later in tests. + d.consumerUpdates = append(d.consumerUpdates, d.km.ComputeUpdates(s.timeProvider, s.providerUpdates)) + d.lastTimeProvider = s.timeProvider + } + if d.lastTimeConsumer < s.timeConsumer { + // Consumer time increase: + // For each unit of time that has passed since the last increase, apply + // any updates which have been 'emitted' by a provider time increase step. + for j := d.lastTimeConsumer + 1; j <= s.timeConsumer; j++ { + d.consumerValsets.applyUpdates(d.consumerUpdates[j]) + } + d.lastTimeConsumer = s.timeConsumer + } + if d.lastTimeMaturity < s.timeMaturity { + // Maturity time increase: + // For each unit of time that has passed since the last increase, + // a maturity is 'available'. We test batch maturity. + d.km.PruneUnusedKeys(s.timeMaturity) + d.lastTimeMaturity = s.timeMaturity + } + + // Do checks + require.True(d.t, d.km.internalInvariants()) + d.externalInvariants() + } +} + +// Check invariants which are 'external' to the data structure being used. +// That is: these invariants make sense in the context of the wider system, +// and aren't specifically about the KeyDel data structure internal state. +// +// There are three invariants +// +// 1. Validator Set Replication +// 'All consumer validator sets are some earlier provider validator set' +// +// 2. Queries +// 'It is always possible to query the provider key for a given consumer +// key, when the consumer can still make slash requests' +// +// 3. Pruning +// 'When the pruning method is used correctly, the internal state of the +// data structure does not grow unboundedly' +// +// Please see body for details. +func (d *driver) externalInvariants() { + + /* + For a consumer who has received updates up to vscid i, its + provider validator set must be equal to the set on the provider + when i was sent, mapped through the mapping at that time. + */ + validatorSetReplication := func() { + + // Get the consumer set. + cSet := d.consumerValsets.keyToPower + // Get the provider set - at the corresponding time. + pSet := d.providerValsets[d.lastTimeConsumer].keyToPower + + // Compute a reverse lookup allowing comparison + // of the two sets. + cSetLikePSet := map[PK]int{} + { + mapping := d.mappings[d.lastTimeConsumer] + inverseMapping := map[CK]PK{} + for pk, ck := range mapping { + inverseMapping[ck] = pk + } + for ck, power := range cSet { + cSetLikePSet[inverseMapping[ck]] = power + } + } + + // Check that the two validator sets match exactly. + for pk, expectedPower := range pSet { + actualPower := cSetLikePSet[pk] + require.Equal(d.t, expectedPower, actualPower) + } + for pk, actualPower := range cSetLikePSet { + expectedPower := pSet[pk] + require.Equal(d.t, expectedPower, actualPower) + } + } + + /* + For any key that the consumer is aware of, because it has + received that key at some time in the past, and has not yet + returned the maturity vscid for its removal: + the key is useable as a query parameter to lookup the key + of the validator which should be slashed for misbehavior. + */ + queries := func() { + // For each key known to the consumer + for ck := range d.consumerValsets.keyToPower { + + // The query must return a result + pkQueried, err := d.km.GetProviderKey(ck) + require.Nil(d.t, err) + + // The provider key must be the one that was actually referenced + // in the latest trueMapping used to compute updates sent to the + // consumer. + cks_TRUE := map[CK]bool{} + trueMapping := d.mappings[d.lastTimeConsumer] + for pk_TRUE, ck_TRUE := range trueMapping { + + // Sanity check: no two provider keys should map to the same consumer key + require.Falsef(d.t, cks_TRUE[ck_TRUE], "two provider keys map to the same consumer key") + + // Record that this consumer key was indeed mapped to by some provider key + // at time lastTimeConsumer + cks_TRUE[ck_TRUE] = true + + // If the consumer key is the one used as a query param + if ck == ck_TRUE { + // Then the provider key returned by the query must be exactly + // the same one as was actually mapped to. + require.Equal(d.t, pk_TRUE, pkQueried) + } + } + // Check that the comparison was actually made, and that the test + // actually did something. + require.Truef(d.t, cks_TRUE[ck], "no mapping found for consumer key") + } + } + + /* + All keys that the consumer definitely cannot use as a parameter in + a slash request must eventually be pruned from state. + A consumer can still reference a key if the last update it received + for the key had a positive power associated to it, OR the last update + had a 0 power associated (deletion) but the maturity period for that + update has not yet elapsed (and the maturity was not yet received + on the provider chain). + */ + pruning := func() { + + // Do we expect to be able to query the provider key for a given consumer + // key? + expectQueryable := map[CK]bool{} + + for i := 0; i <= d.lastTimeMaturity; i++ { + for _, u := range d.consumerUpdates[i] { + // If the latest update for a given consumer key was dispatched + // AND also matured since the last maturity, then + // 1) if that update was a positive power update then no subsequent + // zero power update can have matured. Thus the key should be + // queryable. + // 2) if that update was a zero positive power update then the + // key should not be queryable unless it was used in a subsquent + // update (see next block). + expectQueryable[u.key] = 0 < u.power + } + } + for i := d.lastTimeMaturity + 1; i <= d.lastTimeProvider; i++ { + for _, u := range d.consumerUpdates[i] { + // If a positive OR zero power update was RECENTLY received + // for the consumer, then the key must be queryable. + expectQueryable[u.key] = true + } + } + // If a consumer key is CURRENTLY mapped to by a provider key, it + // must be queryable. + for _, ck := range d.km.pkToCk { + expectQueryable[ck] = true + } + + // Simply check every consumer key for the correct queryable-ness. + for ck := 0; ck < NUM_CKS; ck++ { + _, err := d.km.GetProviderKey(ck) + actualQueryable := err == nil + if expect, found := expectQueryable[ck]; found && expect { + require.True(d.t, actualQueryable) + } else { + require.False(d.t, actualQueryable) + } + } + } + + validatorSetReplication() + queries() + pruning() + +} + +// Return a randomly generated list of steps +// which can be used to execute actions for testing. +func getTrace(t *testing.T) []traceStep { + + keyMappings := func() []keyMapEntry { + ret := []keyMapEntry{} + + const NUM_ITS = 2 // Chosen arbitrarily/heuristically + // Do this NUM_ITS times, to be able to generate conflicting mappings. + // This is allowed by the KeyDel API, so it must be tested. + for i := 0; i < NUM_ITS; i++ { + // include none (to) all validators + pks := rand.Perm(NUM_VALS)[0:rand.Intn(NUM_VALS+1)] + for _, pk := range pks { + ck := rand.Intn(NUM_CKS) + ret = append(ret, keyMapEntry{pk, ck}) + } + } + return ret + } + + providerUpdates := func() []update { + ret := []update{} + + // include none (to) all validators + pks := rand.Perm(NUM_VALS)[0:rand.Intn(NUM_VALS+1)] + for _, pk := range pks { + // Only three values are interesting + // 0: deletion + // 1: positive + // 2: positive (change) + power := rand.Intn(3) + ret = append(ret, update{key: pk, power: power}) + } + return ret + } + + // Get an initial key mapping. + // The real system may use some manual set defaults. + initialMappings := []keyMapEntry{} + for i := 0; i < NUM_VALS; i++ { + initialMappings = append(initialMappings, keyMapEntry{i, i}) + } + + ret := []traceStep{ + { + // Hard code initial mapping + keyMapEntries: initialMappings, + providerUpdates: providerUpdates(), + timeProvider: 0, + timeConsumer: 0, + timeMaturity: 0, + }, + } + + for i := 0; i < TRACE_LEN; i++ { + choice := rand.Intn(3) + last := ret[len(ret)-1] + if choice == 0 { + // Increment provider time, and generate + // new key mappings and validator updates. + ret = append(ret, traceStep{ + keyMapEntries: keyMappings(), + providerUpdates: providerUpdates(), + timeProvider: last.timeProvider + 1, + timeConsumer: last.timeConsumer, + timeMaturity: last.timeMaturity, + }) + } + if choice == 1 { + // If possible, increase consumer time. + // This models receiving VSC packets on the consumer. + curr := last.timeConsumer + limInclusive := last.timeProvider + if curr < limInclusive { + // add in [1, limInclusive - curr] + // rand in [0, limInclusive - curr - 1] + // bound is [0, limInclusive - curr) + newTC := rand.Intn(limInclusive-curr) + curr + 1 + require.True(t, curr < newTC && newTC <= limInclusive) + ret = append(ret, traceStep{ + keyMapEntries: nil, + providerUpdates: nil, + timeProvider: last.timeProvider, + timeConsumer: newTC, + timeMaturity: last.timeMaturity, + }) + } + } + if choice == 2 { + // If possible, increase maturity time. + // This models sending maturities on the consumer (and also + // receiving them on the provider). + curr := last.timeMaturity + limInclusive := last.timeConsumer + if curr < limInclusive { + newTM := rand.Intn(limInclusive-curr) + curr + 1 + require.True(t, curr < newTM && newTM <= limInclusive) + ret = append(ret, traceStep{ + keyMapEntries: nil, + providerUpdates: nil, + timeProvider: last.timeProvider, + timeConsumer: last.timeConsumer, + timeMaturity: newTM, + }) + } + } + } + return ret +} + +// Execute randomly generated traces (lists of actions) +// against new instances of the class, checking properties +// after each action is done. +func TestPropertiesRandomlyHeuristically(t *testing.T) { + for i := 0; i < NUM_TRACES; i++ { + trace := []traceStep{} + for len(trace) < 2 { + trace = getTrace(t) + } + d := makeDriver(t, trace) + d.run() + } +} + +// Setting should enable a reverse query +func TestXSetReverseQuery(t *testing.T) { + s := makeStore() + kd := MakeKeyMap(&s) + kd.SetProviderKeyToConsumerKey(42, 43) + actual, err := kd.GetProviderKey(43) // Queryable + require.Nil(t, err) + require.Equal(t, 42, actual) +} + +// Not setting should not enable a reverse query +func TestNoSetReverseQuery(t *testing.T) { + s := makeStore() + kd := MakeKeyMap(&s) + _, err := kd.GetProviderKey(43) // Not queryable + require.NotNil(t, err) +} + +// Setting and replacing should no allow earlier reverse query +func TestXSetUnsetReverseQuery(t *testing.T) { + s := makeStore() + kd := MakeKeyMap(&s) + kd.SetProviderKeyToConsumerKey(42, 43) + kd.SetProviderKeyToConsumerKey(42, 44) // Set to different value + _, err := kd.GetProviderKey(43) // Ealier value not queryable + require.NotNil(t, err) +} + +// TODO: add more of these.. diff --git a/x/ccv/provider/keeper/params_test.go b/x/ccv/provider/keeper/params_test.go index b67c856398..6a8298ad6f 100644 --- a/x/ccv/provider/keeper/params_test.go +++ b/x/ccv/provider/keeper/params_test.go @@ -4,8 +4,6 @@ import ( "testing" "time" - capabilitykeeper "github.com/cosmos/cosmos-sdk/x/capability/keeper" - paramstypes "github.com/cosmos/cosmos-sdk/x/params/types" clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" commitmenttypes "github.com/cosmos/ibc-go/v3/modules/core/23-commitment/types" ibctmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" @@ -14,33 +12,15 @@ import ( "github.com/stretchr/testify/require" ) +// TestParams tests the default params of the keeper, and getting/setting new params. func TestParams(t *testing.T) { defaultParams := types.DefaultParams() - // Constuct our own params subspace - cdc, storeKey, paramsSubspace, ctx := testkeeper.SetupInMemKeeper(t) - keyTable := paramstypes.NewKeyTable(paramstypes.NewParamSetPair(types.KeyTemplateClient, &ibctmtypes.ClientState{}, func(value interface{}) error { return nil })) - paramsSubspace = paramsSubspace.WithKeyTable(keyTable) - - expectedClientState := - ibctmtypes.NewClientState("", ibctmtypes.DefaultTrustLevel, 0, 0, - time.Second*10, clienttypes.Height{}, commitmenttypes.GetSDKSpecs(), []string{"upgrade", "upgradedIBCState"}, true, true) - - paramsSubspace.Set(ctx, types.KeyTemplateClient, expectedClientState) - - providerKeeper := testkeeper.GetProviderKeeperWithMocks( - cdc, - storeKey, - paramsSubspace, - capabilitykeeper.ScopedKeeper{}, - &testkeeper.MockChannelKeeper{}, - &testkeeper.MockPortKeeper{}, - &testkeeper.MockConnectionKeeper{}, - &testkeeper.MockClientKeeper{}, - &testkeeper.MockStakingKeeper{}, - &testkeeper.MockSlashingKeeper{}, - &testkeeper.MockAccountKeeper{}, - ) + // Construct an in-mem keeper with a populated template client state + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() params := providerKeeper.GetParams(ctx) require.Equal(t, defaultParams, params) diff --git a/x/ccv/provider/keeper/proposal.go b/x/ccv/provider/keeper/proposal.go index 2597770af9..a37c823483 100644 --- a/x/ccv/provider/keeper/proposal.go +++ b/x/ccv/provider/keeper/proposal.go @@ -20,37 +20,100 @@ import ( consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" ) -// CreateConsumerChainProposal will receive the consumer chain's client state from the proposal. +// HandleConsumerAdditionProposal will receive the consumer chain's client state from the proposal. // If the spawn time has already passed, then set the consumer chain. Otherwise store the client // as a pending client, and set once spawn time has passed. -func (k Keeper) CreateConsumerChainProposal(ctx sdk.Context, p *types.CreateConsumerChainProposal) error { +// +// Note: This method implements SpawnConsumerChainProposalHandler in spec. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-spccprop1 +// Spec tag: [CCV-PCF-SPCCPROP.1] +func (k Keeper) HandleConsumerAdditionProposal(ctx sdk.Context, p *types.ConsumerAdditionProposal) error { if !ctx.BlockTime().Before(p.SpawnTime) { // lockUbdOnTimeout is set to be false, regardless of what the proposal says, until we can specify and test issues around this use case more thoroughly return k.CreateConsumerClient(ctx, p.ChainId, p.InitialHeight, false) } - err := k.SetPendingCreateProposal(ctx, p) + err := k.SetPendingConsumerAdditionProp(ctx, p) + if err != nil { + return err + } + + return nil +} + +// CreateConsumerClient will create the CCV client for the given consumer chain. The CCV channel must be built +// on top of the CCV client to ensure connection with the right consumer chain. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-crclient1 +// Spec tag: [CCV-PCF-CRCLIENT.1] +func (k Keeper) CreateConsumerClient(ctx sdk.Context, chainID string, initialHeight clienttypes.Height, lockUbdOnTimeout bool) error { + // check that a client for this chain does not exist + if _, found := k.GetConsumerClientId(ctx, chainID); found { + // drop the proposal + return nil + } + + // Use the unbonding period on the provider to compute the unbonding period on the consumer + unbondingPeriod := utils.ComputeConsumerUnbondingPeriod(k.stakingKeeper.UnbondingTime(ctx)) + + // Create client state by getting template client from parameters and filling in zeroed fields from proposal. + clientState := k.GetTemplateClient(ctx) + clientState.ChainId = chainID + clientState.LatestHeight = initialHeight + clientState.TrustingPeriod = unbondingPeriod / utils.TrustingPeriodFraction + clientState.UnbondingPeriod = unbondingPeriod + + // TODO: Allow for current validators to set different keys + consensusState := ibctmtypes.NewConsensusState( + ctx.BlockTime(), + commitmenttypes.NewMerkleRoot([]byte(ibctmtypes.SentinelRoot)), + ctx.BlockHeader().NextValidatorsHash, + ) + + clientID, err := k.clientKeeper.CreateClient(ctx, clientState, consensusState) + if err != nil { + return err + } + k.SetConsumerClientId(ctx, chainID, clientID) + + consumerGen, err := k.MakeConsumerGenesis(ctx) + if err != nil { + return err + } + err = k.SetConsumerGenesis(ctx, chainID, consumerGen) if err != nil { return err } + // store LockUnbondingOnTimeout flag + if lockUbdOnTimeout { + k.SetLockUnbondingOnTimeout(ctx, chainID) + } return nil } -// StopConsumerChainProposal stops a consumer chain and released the outstanding unbonding operations. +// HandleConsumerRemovalProposal stops a consumer chain and released the outstanding unbonding operations. // If the stop time hasn't already passed, it stores the proposal as a pending proposal. -func (k Keeper) StopConsumerChainProposal(ctx sdk.Context, p *types.StopConsumerChainProposal) error { +// +// This method implements StopConsumerChainProposalHandler from spec. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-stccprop1 +// Spec tag: [CCV-PCF-STCCPROP.1] +func (k Keeper) HandleConsumerRemovalProposal(ctx sdk.Context, p *types.ConsumerRemovalProposal) error { if !ctx.BlockTime().Before(p.StopTime) { return k.StopConsumerChain(ctx, p.ChainId, false, true) } - k.SetPendingStopProposal(ctx, p.ChainId, p.StopTime) + k.SetPendingConsumerRemovalProp(ctx, p.ChainId, p.StopTime) return nil } // StopConsumerChain cleans up the states for the given consumer chain ID and, if the given lockUbd is false, // it completes the outstanding unbonding operations lock by the consumer chain. +// +// This method implements StopConsumerChain from spec. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-stcc1 +// Spec tag: [CCV-PCF-STCC.1] func (k Keeper) StopConsumerChain(ctx sdk.Context, chainID string, lockUbd, closeChan bool) (err error) { // check that a client for chainID exists if _, found := k.GetConsumerClientId(ctx, chainID); !found { @@ -60,6 +123,7 @@ func (k Keeper) StopConsumerChain(ctx sdk.Context, chainID string, lockUbd, clos // clean up states k.DeleteConsumerClientId(ctx, chainID) + k.DeleteConsumerGenesis(ctx, chainID) k.DeleteLockUnbondingOnTimeout(ctx, chainID) // close channel and delete the mappings between chain ID and channel ID @@ -71,11 +135,12 @@ func (k Keeper) StopConsumerChain(ctx sdk.Context, chainID string, lockUbd, clos k.DeleteChannelToChain(ctx, channelID) } - // TODO remove pending VSC packets once https://github.com/cosmos/interchain-security/issues/27 is fixed k.DeleteInitChainHeight(ctx, chainID) k.EmptySlashAcks(ctx, chainID) + k.EmptyPendingVSC(ctx, chainID) // release unbonding operations if they aren't locked + var vscIDs []uint64 if !lockUbd { // iterate over the consumer chain's unbonding operation VSC ids k.IterateOverUnbondingOpIndex(ctx, chainID, func(vscID uint64, ids []uint64) bool { @@ -105,63 +170,25 @@ func (k Keeper) StopConsumerChain(ctx sdk.Context, chainID string, lockUbd, clos if err := k.AppendMaturedUnbondingOps(ctx, maturedIds); err != nil { panic(fmt.Errorf("mature unbonding ops could not be appended: %w", err)) } - // clean up index - k.DeleteUnbondingOpIndex(ctx, chainID, vscID) + + vscIDs = append(vscIDs, vscID) return true }) } - if err != nil { - return err - } - - return nil -} - -// CreateConsumerClient will create the CCV client for the given consumer chain. The CCV channel must be built -// on top of the CCV client to ensure connection with the right consumer chain. -func (k Keeper) CreateConsumerClient(ctx sdk.Context, chainID string, initialHeight clienttypes.Height, lockUbdOnTimeout bool) error { - // check that a client for this chain does not exist - if _, found := k.GetConsumerClientId(ctx, chainID); found { - // drop the proposal - return nil - } - // Use the unbonding period on the provider to - // compute the unbonding period on the consumer - unbondingTime := utils.ComputeConsumerUnbondingPeriod(k.stakingKeeper.UnbondingTime(ctx)) - - // create clientstate by getting template client from parameters and filling in zeroed fields from proposal. - clientState := k.GetTemplateClient(ctx) - clientState.ChainId = chainID - clientState.LatestHeight = initialHeight - clientState.TrustingPeriod = unbondingTime / utils.TrustingPeriodFraction - clientState.UnbondingPeriod = unbondingTime - - // TODO: Allow for current validators to set different keys - consensusState := ibctmtypes.NewConsensusState(ctx.BlockTime(), commitmenttypes.NewMerkleRoot([]byte(ibctmtypes.SentinelRoot)), ctx.BlockHeader().NextValidatorsHash) - clientID, err := k.clientKeeper.CreateClient(ctx, clientState, consensusState) if err != nil { return err } - k.SetConsumerClientId(ctx, chainID, clientID) - consumerGen, err := k.MakeConsumerGenesis(ctx) - if err != nil { - return err + // clean up indexes + for _, id := range vscIDs { + k.DeleteUnbondingOpIndex(ctx, chainID, id) } - err = k.SetConsumerGenesis(ctx, chainID, consumerGen) - if err != nil { - return err - } - - // store LockUnbondingOnTimeout flag - if lockUbdOnTimeout { - k.SetLockUnbondingOnTimeout(ctx, chainID) - } return nil } +// MakeConsumerGenesis constructs a consumer genesis state. func (k Keeper) MakeConsumerGenesis(ctx sdk.Context) (gen consumertypes.GenesisState, err error) { unbondingTime := k.stakingKeeper.UnbondingTime(ctx) height := clienttypes.GetSelfHeight(ctx) @@ -220,40 +247,44 @@ func (k Keeper) MakeConsumerGenesis(ctx sdk.Context) (gen consumertypes.GenesisS return gen, nil } -// SetPendingCreateProposal stores a pending proposal to create a consumer chain client -func (k Keeper) SetPendingCreateProposal(ctx sdk.Context, clientInfo *types.CreateConsumerChainProposal) error { +// SetPendingConsumerAdditionProp stores a pending proposal to create a consumer chain client +func (k Keeper) SetPendingConsumerAdditionProp(ctx sdk.Context, clientInfo *types.ConsumerAdditionProposal) error { store := ctx.KVStore(k.storeKey) bz, err := k.cdc.Marshal(clientInfo) if err != nil { return err } - store.Set(types.PendingCreateProposalKey(clientInfo.SpawnTime, clientInfo.ChainId), bz) + store.Set(types.PendingCAPKey(clientInfo.SpawnTime, clientInfo.ChainId), bz) return nil } -// GetPendingCreateProposal retrieves a pending proposal to create a consumer chain client (by spawn time and chain id) -func (k Keeper) GetPendingCreateProposal(ctx sdk.Context, spawnTime time.Time, chainID string) types.CreateConsumerChainProposal { +// GetPendingConsumerAdditionProp retrieves a pending proposal to create a consumer chain client (by spawn time and chain id) +func (k Keeper) GetPendingConsumerAdditionProp(ctx sdk.Context, spawnTime time.Time, + chainID string) (prop types.ConsumerAdditionProposal, found bool) { store := ctx.KVStore(k.storeKey) - bz := store.Get(types.PendingCreateProposalKey(spawnTime, chainID)) + bz := store.Get(types.PendingCAPKey(spawnTime, chainID)) if len(bz) == 0 { - return types.CreateConsumerChainProposal{} + return prop, false } - var clientInfo types.CreateConsumerChainProposal - k.cdc.MustUnmarshal(bz, &clientInfo) + k.cdc.MustUnmarshal(bz, &prop) - return clientInfo + return prop, true } -func (k Keeper) PendingCreateProposalIterator(ctx sdk.Context) sdk.Iterator { +// PendingConsumerAdditionPropIterator returns an iterator for iterating through pending consumer addition proposals +func (k Keeper) PendingConsumerAdditionPropIterator(ctx sdk.Context) sdk.Iterator { store := ctx.KVStore(k.storeKey) - return sdk.KVStorePrefixIterator(store, []byte{types.PendingCreateProposalBytePrefix}) + return sdk.KVStorePrefixIterator(store, []byte{types.PendingCAPBytePrefix}) } -// IteratePendingCreateProposal iterates over the pending proposals to create consumer chain clients in order -// and creates the consumer client if the spawn time has passed. -func (k Keeper) IteratePendingCreateProposal(ctx sdk.Context) { - propsToExecute := k.CreateProposalsToExecute(ctx) +// BeginBlockInit iterates over the pending consumer addition proposals in order, and creates +// clients for props in which the spawn time has passed. Executed proposals are deleted. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-bblock-init1 +// Spec tag:[CCV-PCF-BBLOCK-INIT.1] +func (k Keeper) BeginBlockInit(ctx sdk.Context) { + propsToExecute := k.ConsumerAdditionPropsToExecute(ctx) for _, prop := range propsToExecute { // lockUbdOnTimeout is set to be false, regardless of what the proposal says, until we can specify and test issues around this use case more thoroughly @@ -263,86 +294,102 @@ func (k Keeper) IteratePendingCreateProposal(ctx sdk.Context) { } } // delete the executed proposals - k.DeletePendingCreateProposal(ctx, propsToExecute...) + k.DeletePendingConsumerAdditionProps(ctx, propsToExecute...) } -// CreateProposalsToExecute iterates over the pending proposals and returns an ordered list of proposals to be executed, -// ie. consumer clients to be created. A prop is included in the returned list if its proposed spawn time has passed. +// ConsumerAdditionPropsToExecute iterates over the pending consumer addition proposals +// and returns an ordered list of proposals to be executed, ie. consumer clients to be created. +// A prop is included in the returned list if its proposed spawn time has passed. // -// Note: this method is split out from IteratePendingCreateProposal to be easily unit tested. -func (k Keeper) CreateProposalsToExecute(ctx sdk.Context) []types.CreateConsumerChainProposal { +// Note: this method is split out from BeginBlockInit to be easily unit tested. +func (k Keeper) ConsumerAdditionPropsToExecute(ctx sdk.Context) []types.ConsumerAdditionProposal { // store the (to be) executed proposals in order - propsToExecute := []types.CreateConsumerChainProposal{} + propsToExecute := []types.ConsumerAdditionProposal{} - iterator := k.PendingCreateProposalIterator(ctx) + iterator := k.PendingConsumerAdditionPropIterator(ctx) defer iterator.Close() - if !iterator.Valid() { - return propsToExecute - } + k.IteratePendingConsumerAdditionProps(ctx, func(spawnTime time.Time, prop types.ConsumerAdditionProposal) bool { + if !ctx.BlockTime().Before(spawnTime) { + propsToExecute = append(propsToExecute, prop) + return true + } + return false + }) + + return propsToExecute +} + +func (k Keeper) IteratePendingConsumerAdditionProps(ctx sdk.Context, cb func(spawnTime time.Time, prop types.ConsumerAdditionProposal) bool) { + iterator := k.PendingConsumerAdditionPropIterator(ctx) + defer iterator.Close() for ; iterator.Valid(); iterator.Next() { key := iterator.Key() - spawnTime, _, err := types.ParsePendingCreateProposalKey(key) + spawnTime, _, err := types.ParsePendingCAPKey(key) if err != nil { panic(fmt.Errorf("failed to parse pending client key: %w", err)) } - var prop types.CreateConsumerChainProposal + var prop types.ConsumerAdditionProposal k.cdc.MustUnmarshal(iterator.Value(), &prop) - if !ctx.BlockTime().Before(spawnTime) { - propsToExecute = append(propsToExecute, prop) - } else { - // No more proposals to check, since they're stored/ordered by timestamp. - break + if !cb(spawnTime, prop) { + return } } - return propsToExecute } -// DeletePendingCreateProposal deletes the given create consumer proposals -func (k Keeper) DeletePendingCreateProposal(ctx sdk.Context, proposals ...types.CreateConsumerChainProposal) { +// DeletePendingConsumerAdditionProps deletes the given consumer addition proposals. +// This method should be called once the proposal has been acted upon. +func (k Keeper) DeletePendingConsumerAdditionProps(ctx sdk.Context, proposals ...types.ConsumerAdditionProposal) { store := ctx.KVStore(k.storeKey) for _, p := range proposals { - store.Delete(types.PendingCreateProposalKey(p.SpawnTime, p.ChainId)) + store.Delete(types.PendingCAPKey(p.SpawnTime, p.ChainId)) } } -// SetPendingStopProposal sets the consumer chain ID for the given timestamp -func (k Keeper) SetPendingStopProposal(ctx sdk.Context, chainID string, timestamp time.Time) { +// SetPendingConsumerRemovalProp stores a pending proposal to remove and stop a consumer chain +func (k Keeper) SetPendingConsumerRemovalProp(ctx sdk.Context, chainID string, timestamp time.Time) { store := ctx.KVStore(k.storeKey) - store.Set(types.PendingStopProposalKey(timestamp, chainID), []byte{}) + store.Set(types.PendingCRPKey(timestamp, chainID), []byte{}) } -// GetPendingStopProposal returns a boolean if a pending stop proposal exists for the given consumer chain ID and the timestamp -func (k Keeper) GetPendingStopProposal(ctx sdk.Context, chainID string, timestamp time.Time) bool { +// GetPendingConsumerRemovalProp returns a boolean if a pending consumer removal proposal +// exists for the given consumer chain ID and timestamp +func (k Keeper) GetPendingConsumerRemovalProp(ctx sdk.Context, chainID string, timestamp time.Time) bool { store := ctx.KVStore(k.storeKey) - bz := store.Get(types.PendingStopProposalKey(timestamp, chainID)) + bz := store.Get(types.PendingCRPKey(timestamp, chainID)) return bz != nil } -// DeletePendingStopProposals deletes the given stop proposals -func (k Keeper) DeletePendingStopProposals(ctx sdk.Context, proposals ...types.StopConsumerChainProposal) { +// DeletePendingConsumerRemovalProps deletes the given pending consumer removal proposals. +// This method should be called once the proposal has been acted upon. +func (k Keeper) DeletePendingConsumerRemovalProps(ctx sdk.Context, proposals ...types.ConsumerRemovalProposal) { store := ctx.KVStore(k.storeKey) for _, p := range proposals { - store.Delete(types.PendingStopProposalKey(p.StopTime, p.ChainId)) + store.Delete(types.PendingCRPKey(p.StopTime, p.ChainId)) } } -func (k Keeper) PendingStopProposalIterator(ctx sdk.Context) sdk.Iterator { +// PendingConsumerRemovalPropIterator returns an iterator for iterating through pending consumer removal proposals +func (k Keeper) PendingConsumerRemovalPropIterator(ctx sdk.Context) sdk.Iterator { store := ctx.KVStore(k.storeKey) - return sdk.KVStorePrefixIterator(store, []byte{types.PendingStopProposalBytePrefix}) + return sdk.KVStorePrefixIterator(store, []byte{types.PendingCRPBytePrefix}) } -// IteratePendingStopProposal iterates over the pending stop proposals in order and stop the chain if the stop time has passed, -// otherwise it will break out of loop and return. -func (k Keeper) IteratePendingStopProposal(ctx sdk.Context) { - propsToExecute := k.StopProposalsToExecute(ctx) +// BeginBlockCCR iterates over the pending consumer removal proposals +// in order and stop/removes the chain if the stop time has passed, +// otherwise it will break out of loop and return. Executed proposals are deleted. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-bblock-ccr1 +// Spec tag: [CCV-PCF-BBLOCK-CCR.1] +func (k Keeper) BeginBlockCCR(ctx sdk.Context) { + propsToExecute := k.ConsumerRemovalPropsToExecute(ctx) for _, prop := range propsToExecute { err := k.StopConsumerChain(ctx, prop.ChainId, false, true) @@ -351,42 +398,49 @@ func (k Keeper) IteratePendingStopProposal(ctx sdk.Context) { } } // delete the executed proposals - k.DeletePendingStopProposals(ctx, propsToExecute...) + k.DeletePendingConsumerRemovalProps(ctx, propsToExecute...) } -// StopProposalsToExecute iterates over the pending stop proposals and returns an ordered list of stop proposals to be executed, -// ie. consumer chains to stop. A prop is included in the returned list if its proposed stop time has passed. +// ConsumerRemovalPropsToExecute iterates over the pending consumer removal proposals +// and returns an ordered list of consumer removal proposals to be executed, +// ie. consumer chains to be stopped and removed from the provider chain. +// A prop is included in the returned list if its proposed stop time has passed. // -// Note: this method is split out from IteratePendingCreateProposal to be easily unit tested. -func (k Keeper) StopProposalsToExecute(ctx sdk.Context) []types.StopConsumerChainProposal { +// Note: this method is split out from BeginBlockCCR to be easily unit tested. +func (k Keeper) ConsumerRemovalPropsToExecute(ctx sdk.Context) []types.ConsumerRemovalProposal { - // store the (to be) executed stop proposals in order - propsToExecute := []types.StopConsumerChainProposal{} + // store the (to be) executed consumer removal proposals in order + propsToExecute := []types.ConsumerRemovalProposal{} - iterator := k.PendingStopProposalIterator(ctx) - defer iterator.Close() + k.IteratePendingConsumerRemovalProps(ctx, func(stopTime time.Time, prop types.ConsumerRemovalProposal) bool { + if !ctx.BlockTime().Before(stopTime) { + propsToExecute = append(propsToExecute, prop) + return true + } else { + // No more proposals to check, since they're stored/ordered by timestamp. + return false + } + }) - if !iterator.Valid() { - return propsToExecute - } + return propsToExecute +} + +func (k Keeper) IteratePendingConsumerRemovalProps(ctx sdk.Context, cb func(stopTime time.Time, prop types.ConsumerRemovalProposal) bool) { + iterator := k.PendingConsumerRemovalPropIterator(ctx) + defer iterator.Close() for ; iterator.Valid(); iterator.Next() { key := iterator.Key() - stopTime, chainID, err := types.ParsePendingStopProposalKey(key) + stopTime, chainID, err := types.ParsePendingCRPKey(key) if err != nil { - panic(fmt.Errorf("failed to parse pending stop proposal key: %w", err)) + panic(fmt.Errorf("failed to parse pending consumer removal proposal key: %w", err)) } - if !ctx.BlockTime().Before(stopTime) { - propsToExecute = append(propsToExecute, - types.StopConsumerChainProposal{ChainId: chainID, StopTime: stopTime}) - } else { - // No more proposals to check, since they're stored/ordered by timestamp. - break + if !cb(stopTime, types.ConsumerRemovalProposal{ChainId: chainID, StopTime: stopTime}) { + return } } - return propsToExecute } // CloseChannel closes the channel for the given channel ID on the condition diff --git a/x/ccv/provider/keeper/proposal_test.go b/x/ccv/provider/keeper/proposal_test.go index f62f246458..1e309e737b 100644 --- a/x/ccv/provider/keeper/proposal_test.go +++ b/x/ccv/provider/keeper/proposal_test.go @@ -1,207 +1,774 @@ package keeper_test import ( + "encoding/json" "testing" "time" + _go "github.com/confio/ics23/go" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" + ibctmtypes "github.com/cosmos/ibc-go/v3/modules/light-clients/07-tendermint/types" + "github.com/golang/mock/gomock" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/stretchr/testify/require" testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + providerkeeper "github.com/cosmos/interchain-security/x/ccv/provider/keeper" "github.com/cosmos/interchain-security/x/ccv/provider/types" + providertypes "github.com/cosmos/interchain-security/x/ccv/provider/types" ) -func TestPendingStopProposalDeletion(t *testing.T) { +// +// Initialization sub-protocol related tests of proposal.go +// + +// Tests the HandleConsumerAdditionProposal method against the SpawnConsumerChainProposalHandler spec. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-spccprop1 +// Spec tag: [CCV-PCF-SPCCPROP.1] +func TestHandleConsumerAdditionProposal(t *testing.T) { + + type testCase struct { + description string + prop *providertypes.ConsumerAdditionProposal + // Time when prop is handled + blockTime time.Time + // Whether it's expected that the spawn time has passed and client should be created + expCreatedClient bool + } + + // Snapshot times asserted in tests + now := time.Now().UTC() + hourFromNow := now.Add(time.Hour).UTC() + + tests := []testCase{ + { + description: "ctx block time is after proposal's spawn time, expected that client is created", + prop: providertypes.NewConsumerAdditionProposal( + "title", + "description", + "chainID", + clienttypes.NewHeight(2, 3), + []byte("gen_hash"), + []byte("bin_hash"), + now, // Spawn time + ).(*providertypes.ConsumerAdditionProposal), + blockTime: hourFromNow, + expCreatedClient: true, + }, + { + description: `ctx block time is before proposal's spawn time, + expected that no client is created and the proposal is persisted as pending`, + prop: providertypes.NewConsumerAdditionProposal( + "title", + "description", + "chainID", + clienttypes.NewHeight(2, 3), + []byte("gen_hash"), + []byte("bin_hash"), + hourFromNow, // Spawn time + ).(*types.ConsumerAdditionProposal), + blockTime: now, + expCreatedClient: false, + }, + } + + for _, tc := range tests { + // Common setup + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + ctx = ctx.WithBlockTime(tc.blockTime) + + if tc.expCreatedClient { + // Mock calls are only asserted if we expect a client to be created. + gomock.InOrder( + testkeeper.GetMocksForCreateConsumerClient(ctx, &mocks, "chainID", clienttypes.NewHeight(2, 3))..., + ) + } + + tc.prop.LockUnbondingOnTimeout = false // Full functionality not implemented yet. + + err := providerKeeper.HandleConsumerAdditionProposal(ctx, tc.prop) + require.NoError(t, err) + + if tc.expCreatedClient { + testCreatedConsumerClient(t, ctx, providerKeeper, tc.prop.ChainId, "clientID") + } else { + // check that stored pending prop is exactly the same as the initially instantiated prop + gotProposal, found := providerKeeper.GetPendingConsumerAdditionProp(ctx, tc.prop.SpawnTime, tc.prop.ChainId) + require.True(t, found) + require.Equal(t, *tc.prop, gotProposal) + // double check that a client for this chain does not exist + _, found = providerKeeper.GetConsumerClientId(ctx, tc.prop.ChainId) + require.False(t, found) + } + ctrl.Finish() + } +} + +// Tests the CreateConsumerClient method against the spec, +// with more granularity than what's covered in TestHandleCreateConsumerChainProposal. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-crclient1 +// Spec tag: [CCV-PCF-CRCLIENT.1] +func TestCreateConsumerClient(t *testing.T) { + + type testCase struct { + description string + // Any state-mutating setup on keeper and expected mock calls, specific to this test case + setup func(*providerkeeper.Keeper, sdk.Context, *testkeeper.MockedKeepers) + // Whether a client should be created + expClientCreated bool + } + tests := []testCase{ + { + description: "No state mutation, new client should be created", + setup: func(providerKeeper *providerkeeper.Keeper, ctx sdk.Context, mocks *testkeeper.MockedKeepers) { + + // Valid client creation is asserted with mock expectations here + gomock.InOrder( + testkeeper.GetMocksForCreateConsumerClient(ctx, mocks, "chainID", clienttypes.NewHeight(4, 5))..., + ) + }, + expClientCreated: true, + }, + { + description: "client for this chain already exists, new one is not created", + setup: func(providerKeeper *providerkeeper.Keeper, ctx sdk.Context, mocks *testkeeper.MockedKeepers) { + + providerKeeper.SetConsumerClientId(ctx, "chainID", "clientID") + + // Expect none of the client creation related calls to happen + mocks.MockStakingKeeper.EXPECT().UnbondingTime(gomock.Any()).Times(0) + mocks.MockClientKeeper.EXPECT().CreateClient(gomock.Any(), gomock.Any(), gomock.Any()).Times(0) + mocks.MockClientKeeper.EXPECT().GetSelfConsensusState(gomock.Any(), gomock.Any()).Times(0) + mocks.MockStakingKeeper.EXPECT().IterateLastValidatorPowers(gomock.Any(), gomock.Any()).Times(0) + + }, + expClientCreated: false, + }, + } + + for _, tc := range tests { + // Common setup + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + + // Test specific setup + tc.setup(&providerKeeper, ctx, &mocks) + + // Call method with same arbitrary values as defined above in mock expectations. + err := providerKeeper.CreateConsumerClient( + ctx, "chainID", clienttypes.NewHeight(4, 5), false) // LockUbdOnTimeout always false for now + + require.NoError(t, err) + + if tc.expClientCreated { + testCreatedConsumerClient(t, ctx, providerKeeper, "chainID", "clientID") + } + + // Assert mock calls from setup functions + ctrl.Finish() + } +} + +// Executes test assertions for a created consumer client. +// +// Note: Separated from TestCreateConsumerClient to also be called from TestCreateConsumerChainProposal. +func testCreatedConsumerClient(t *testing.T, + ctx sdk.Context, providerKeeper providerkeeper.Keeper, expectedChainID string, expectedClientID string) { + + // ClientID should be stored. + clientId, found := providerKeeper.GetConsumerClientId(ctx, expectedChainID) + require.True(t, found, "consumer client not found") + require.Equal(t, expectedClientID, clientId) + + // Lock unbonding on timeout flag always false for now. + lockUbdOnTimeout := providerKeeper.GetLockUnbondingOnTimeout(ctx, expectedChainID) + require.False(t, lockUbdOnTimeout) + + // Only assert that consumer genesis was set, + // more granular tests on consumer genesis should be defined in TestMakeConsumerGenesis + _, ok := providerKeeper.GetConsumerGenesis(ctx, expectedChainID) + require.True(t, ok) +} + +// TestPendingConsumerAdditionPropDeletion tests the getting/setting +// and deletion keeper methods for pending consumer addition props +func TestPendingConsumerAdditionPropDeletion(t *testing.T) { testCases := []struct { - types.StopConsumerChainProposal + types.ConsumerAdditionProposal ExpDeleted bool }{ { - StopConsumerChainProposal: types.StopConsumerChainProposal{ChainId: "8", StopTime: time.Now().UTC()}, - ExpDeleted: true, + ConsumerAdditionProposal: types.ConsumerAdditionProposal{ChainId: "0", SpawnTime: time.Now().UTC()}, + ExpDeleted: true, }, { - StopConsumerChainProposal: types.StopConsumerChainProposal{ChainId: "9", StopTime: time.Now().UTC().Add(time.Hour)}, - ExpDeleted: false, + ConsumerAdditionProposal: types.ConsumerAdditionProposal{ChainId: "1", SpawnTime: time.Now().UTC().Add(time.Hour)}, + ExpDeleted: false, }, } - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() for _, tc := range testCases { - providerKeeper.SetPendingStopProposal(ctx, tc.ChainId, tc.StopTime) + err := providerKeeper.SetPendingConsumerAdditionProp(ctx, &tc.ConsumerAdditionProposal) + require.NoError(t, err) } ctx = ctx.WithBlockTime(time.Now().UTC()) - propsToExecute := providerKeeper.StopProposalsToExecute(ctx) - // Delete stop proposals, same as what would be done by IteratePendingStopProposal - providerKeeper.DeletePendingStopProposals(ctx, propsToExecute...) + propsToExecute := providerKeeper.ConsumerAdditionPropsToExecute(ctx) + // Delete consumer addition proposals, same as what would be done by BeginBlockInit + providerKeeper.DeletePendingConsumerAdditionProps(ctx, propsToExecute...) numDeleted := 0 for _, tc := range testCases { - res := providerKeeper.GetPendingStopProposal(ctx, tc.ChainId, tc.StopTime) + res, found := providerKeeper.GetPendingConsumerAdditionProp(ctx, tc.SpawnTime, tc.ChainId) if !tc.ExpDeleted { - require.NotEmpty(t, res, "stop proposal was deleted: %s %s", tc.ChainId, tc.StopTime.String()) + require.True(t, found) + require.NotEmpty(t, res, "consumer addition proposal was deleted: %s %s", tc.ChainId, tc.SpawnTime.String()) continue } - require.Empty(t, res, "stop proposal was not deleted %s %s", tc.ChainId, tc.StopTime.String()) + require.Empty(t, res, "consumer addition proposal was not deleted %s %s", tc.ChainId, tc.SpawnTime.String()) require.Equal(t, propsToExecute[numDeleted].ChainId, tc.ChainId) numDeleted += 1 } } -// Tests that pending stop proposals are accessed in order by timestamp via the iterator -func TestPendingStopProposalsOrder(t *testing.T) { +// TestPendingConsumerAdditionPropOrder tests that pending consumer addition proposals +// are accessed in order by timestamp via the iterator +func TestPendingConsumerAdditionPropOrder(t *testing.T) { now := time.Now().UTC() // props with unique chain ids and spawn times - sampleProp1 := types.StopConsumerChainProposal{ChainId: "1", StopTime: now} - sampleProp2 := types.StopConsumerChainProposal{ChainId: "2", StopTime: now.Add(1 * time.Hour)} - sampleProp3 := types.StopConsumerChainProposal{ChainId: "3", StopTime: now.Add(2 * time.Hour)} - sampleProp4 := types.StopConsumerChainProposal{ChainId: "4", StopTime: now.Add(3 * time.Hour)} - sampleProp5 := types.StopConsumerChainProposal{ChainId: "5", StopTime: now.Add(4 * time.Hour)} + sampleProp1 := types.ConsumerAdditionProposal{ChainId: "1", SpawnTime: now} + sampleProp2 := types.ConsumerAdditionProposal{ChainId: "2", SpawnTime: now.Add(1 * time.Hour)} + sampleProp3 := types.ConsumerAdditionProposal{ChainId: "3", SpawnTime: now.Add(2 * time.Hour)} + sampleProp4 := types.ConsumerAdditionProposal{ChainId: "4", SpawnTime: now.Add(3 * time.Hour)} + sampleProp5 := types.ConsumerAdditionProposal{ChainId: "5", SpawnTime: now.Add(4 * time.Hour)} testCases := []struct { - propSubmitOrder []types.StopConsumerChainProposal + propSubmitOrder []types.ConsumerAdditionProposal accessTime time.Time - expectedOrderedProps []types.StopConsumerChainProposal + expectedOrderedProps []types.ConsumerAdditionProposal }{ { - propSubmitOrder: []types.StopConsumerChainProposal{ + propSubmitOrder: []types.ConsumerAdditionProposal{ sampleProp1, sampleProp2, sampleProp3, sampleProp4, sampleProp5, }, accessTime: now.Add(30 * time.Minute), - expectedOrderedProps: []types.StopConsumerChainProposal{ + expectedOrderedProps: []types.ConsumerAdditionProposal{ sampleProp1, }, }, { - propSubmitOrder: []types.StopConsumerChainProposal{ + propSubmitOrder: []types.ConsumerAdditionProposal{ sampleProp3, sampleProp2, sampleProp1, sampleProp5, sampleProp4, }, accessTime: now.Add(3 * time.Hour).Add(30 * time.Minute), - expectedOrderedProps: []types.StopConsumerChainProposal{ + expectedOrderedProps: []types.ConsumerAdditionProposal{ sampleProp1, sampleProp2, sampleProp3, sampleProp4, }, }, { - propSubmitOrder: []types.StopConsumerChainProposal{ + propSubmitOrder: []types.ConsumerAdditionProposal{ sampleProp5, sampleProp4, sampleProp3, sampleProp2, sampleProp1, }, accessTime: now.Add(5 * time.Hour), - expectedOrderedProps: []types.StopConsumerChainProposal{ + expectedOrderedProps: []types.ConsumerAdditionProposal{ sampleProp1, sampleProp2, sampleProp3, sampleProp4, sampleProp5, }, }, } for _, tc := range testCases { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() + ctx = ctx.WithBlockTime(tc.accessTime) for _, prop := range tc.propSubmitOrder { - providerKeeper.SetPendingStopProposal(ctx, prop.ChainId, prop.StopTime) + err := providerKeeper.SetPendingConsumerAdditionProp(ctx, &prop) + require.NoError(t, err) } - propsToExecute := providerKeeper.StopProposalsToExecute(ctx) + propsToExecute := providerKeeper.ConsumerAdditionPropsToExecute(ctx) require.Equal(t, tc.expectedOrderedProps, propsToExecute) } } -func TestPendingCreateProposalsDeletion(t *testing.T) { +// +// Consumer Chain Removal sub-protocol related tests of proposal.go +// + +// TestHandleConsumerRemovalProposal tests HandleConsumerRemovalProposal against its corresponding spec method. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-stccprop1 +// Spec tag: [CCV-PCF-STCCPROP.1] +func TestHandleConsumerRemovalProposal(t *testing.T) { + + type testCase struct { + description string + // Consumer removal proposal to handle + prop *types.ConsumerRemovalProposal + // Time when prop is handled + blockTime time.Time + // Whether consumer chain should have been stopped + expStop bool + } + + // Snapshot times asserted in tests + now := time.Now().UTC() + hourFromNow := now.Add(time.Hour).UTC() + + tests := []testCase{ + { + description: "valid proposal: stop time reached", + prop: providertypes.NewConsumerRemovalProposal( + "title", + "description", + "chainID", + now, + ).(*providertypes.ConsumerRemovalProposal), + blockTime: hourFromNow, // After stop time. + expStop: true, + }, + { + description: "valid proposal: stop time has not yet been reached", + prop: providertypes.NewConsumerRemovalProposal( + "title", + "description", + "chainID", + hourFromNow, + ).(*providertypes.ConsumerRemovalProposal), + blockTime: now, // Before proposal's stop time + expStop: false, + }, + } + + for _, tc := range tests { + + // Common setup + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + ctx = ctx.WithBlockTime(tc.blockTime) + + // Mock expectations and setup for stopping the consumer chain, if applicable + if tc.expStop { + testkeeper.SetupForStoppingConsumerChain(t, ctx, &providerKeeper, mocks) + } + // Note: when expStop is false, no mocks are setup, + // meaning no external keeper methods are allowed to be called. + + err := providerKeeper.HandleConsumerRemovalProposal(ctx, tc.prop) + require.NoError(t, err) + + if tc.expStop { + // Expect no pending proposal to exist + found := providerKeeper.GetPendingConsumerRemovalProp(ctx, tc.prop.ChainId, tc.prop.StopTime) + require.False(t, found) + + testConsumerStateIsCleaned(t, ctx, providerKeeper, tc.prop.ChainId, "channelID") + } else { + // Proposal should be stored as pending + found := providerKeeper.GetPendingConsumerRemovalProp(ctx, tc.prop.ChainId, tc.prop.StopTime) + require.True(t, found) + } + + // Assert mock calls from setup function + ctrl.Finish() + } +} + +// Tests the StopConsumerChain method against the spec, +// with more granularity than what's covered in TestHandleConsumerRemovalProposal, or e2e tests. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-stcc1 +// Spec tag: [CCV-PCF-STCC.1] +func TestStopConsumerChain(t *testing.T) { + type testCase struct { + description string + // State-mutating setup specific to this test case + setup func(sdk.Context, *providerkeeper.Keeper, testkeeper.MockedKeepers) + // Whether we should expect the method to return an error + expErr bool + } + + tests := []testCase{ + { + description: "fail due to an invalid unbonding index", + setup: func(ctx sdk.Context, providerKeeper *providerkeeper.Keeper, mocks testkeeper.MockedKeepers) { + // set invalid unbonding op index + providerKeeper.SetUnbondingOpIndex(ctx, "chainID", 0, []uint64{0}) + + // StopConsumerChain should return error, but state is still cleaned (asserted with mocks). + testkeeper.SetupForStoppingConsumerChain(t, ctx, providerKeeper, mocks) + }, + expErr: true, + }, + { + description: "proposal dropped, client doesn't exist", + setup: func(ctx sdk.Context, providerKeeper *providerkeeper.Keeper, mocks testkeeper.MockedKeepers) { + // No mocks, meaning no external keeper methods are allowed to be called. + }, + expErr: false, + }, + { + description: "valid stop of consumer chain, all mock calls hit", + setup: func(ctx sdk.Context, providerKeeper *providerkeeper.Keeper, mocks testkeeper.MockedKeepers) { + testkeeper.SetupForStoppingConsumerChain(t, ctx, providerKeeper, mocks) + }, + expErr: false, + }, + } + + for _, tc := range tests { + + // Common setup + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + + // Setup specific to test case + tc.setup(ctx, &providerKeeper, mocks) + + err := providerKeeper.StopConsumerChain(ctx, "chainID", false, true) + + if tc.expErr { + require.Error(t, err) + } else { + require.NoError(t, err) + } + + testConsumerStateIsCleaned(t, ctx, providerKeeper, "chainID", "channelID") + + ctrl.Finish() + } +} + +// testConsumerStateIsCleaned executes test assertions for a stopped consumer chain's state being cleaned. +func testConsumerStateIsCleaned(t *testing.T, ctx sdk.Context, providerKeeper providerkeeper.Keeper, + expectedChainID string, expectedChannelID string) { + + _, found := providerKeeper.GetConsumerClientId(ctx, expectedChainID) + require.False(t, found) + found = providerKeeper.GetLockUnbondingOnTimeout(ctx, expectedChainID) + require.False(t, found) + _, found = providerKeeper.GetChainToChannel(ctx, expectedChainID) + require.False(t, found) + _, found = providerKeeper.GetChannelToChain(ctx, expectedChannelID) + require.False(t, found) + _, found = providerKeeper.GetInitChainHeight(ctx, expectedChainID) + require.False(t, found) + acks := providerKeeper.GetSlashAcks(ctx, expectedChainID) + require.Empty(t, acks) +} + +// TestPendingConsumerRemovalPropDeletion tests the getting/setting +// and deletion methods for pending consumer removal props +func TestPendingConsumerRemovalPropDeletion(t *testing.T) { testCases := []struct { - types.CreateConsumerChainProposal + types.ConsumerRemovalProposal ExpDeleted bool }{ { - CreateConsumerChainProposal: types.CreateConsumerChainProposal{ChainId: "0", SpawnTime: time.Now().UTC()}, - ExpDeleted: true, + ConsumerRemovalProposal: types.ConsumerRemovalProposal{ChainId: "8", StopTime: time.Now().UTC()}, + ExpDeleted: true, }, { - CreateConsumerChainProposal: types.CreateConsumerChainProposal{ChainId: "1", SpawnTime: time.Now().UTC().Add(time.Hour)}, - ExpDeleted: false, + ConsumerRemovalProposal: types.ConsumerRemovalProposal{ChainId: "9", StopTime: time.Now().UTC().Add(time.Hour)}, + ExpDeleted: false, }, } - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() for _, tc := range testCases { - err := providerKeeper.SetPendingCreateProposal(ctx, &tc.CreateConsumerChainProposal) - require.NoError(t, err) + providerKeeper.SetPendingConsumerRemovalProp(ctx, tc.ChainId, tc.StopTime) } ctx = ctx.WithBlockTime(time.Now().UTC()) - propsToExecute := providerKeeper.CreateProposalsToExecute(ctx) - // Delete create proposals, same as what would be done by IteratePendingCreateProposal - providerKeeper.DeletePendingCreateProposal(ctx, propsToExecute...) + propsToExecute := providerKeeper.ConsumerRemovalPropsToExecute(ctx) + // Delete consumer removal proposals, same as what would be done by BeginBlockCCR + providerKeeper.DeletePendingConsumerRemovalProps(ctx, propsToExecute...) numDeleted := 0 for _, tc := range testCases { - res := providerKeeper.GetPendingCreateProposal(ctx, tc.SpawnTime, tc.ChainId) + res := providerKeeper.GetPendingConsumerRemovalProp(ctx, tc.ChainId, tc.StopTime) if !tc.ExpDeleted { - require.NotEmpty(t, res, "create proposal was deleted: %s %s", tc.ChainId, tc.SpawnTime.String()) + require.NotEmpty(t, res, "consumer removal prop was deleted: %s %s", tc.ChainId, tc.StopTime.String()) continue } - require.Empty(t, res, "create proposal was not deleted %s %s", tc.ChainId, tc.SpawnTime.String()) + require.Empty(t, res, "consumer removal prop was not deleted %s %s", tc.ChainId, tc.StopTime.String()) require.Equal(t, propsToExecute[numDeleted].ChainId, tc.ChainId) numDeleted += 1 } } -// Tests that pending create proposals are accessed in order by timestamp via the iterator -func TestPendingCreateProposalsOrder(t *testing.T) { +// Tests that pending consumer removal proposals are accessed in order by timestamp via the iterator +func TestPendingConsumerRemovalPropOrder(t *testing.T) { now := time.Now().UTC() // props with unique chain ids and spawn times - sampleProp1 := types.CreateConsumerChainProposal{ChainId: "1", SpawnTime: now} - sampleProp2 := types.CreateConsumerChainProposal{ChainId: "2", SpawnTime: now.Add(1 * time.Hour)} - sampleProp3 := types.CreateConsumerChainProposal{ChainId: "3", SpawnTime: now.Add(2 * time.Hour)} - sampleProp4 := types.CreateConsumerChainProposal{ChainId: "4", SpawnTime: now.Add(3 * time.Hour)} - sampleProp5 := types.CreateConsumerChainProposal{ChainId: "5", SpawnTime: now.Add(4 * time.Hour)} + sampleProp1 := types.ConsumerRemovalProposal{ChainId: "1", StopTime: now} + sampleProp2 := types.ConsumerRemovalProposal{ChainId: "2", StopTime: now.Add(1 * time.Hour)} + sampleProp3 := types.ConsumerRemovalProposal{ChainId: "3", StopTime: now.Add(2 * time.Hour)} + sampleProp4 := types.ConsumerRemovalProposal{ChainId: "4", StopTime: now.Add(3 * time.Hour)} + sampleProp5 := types.ConsumerRemovalProposal{ChainId: "5", StopTime: now.Add(4 * time.Hour)} testCases := []struct { - propSubmitOrder []types.CreateConsumerChainProposal + propSubmitOrder []types.ConsumerRemovalProposal accessTime time.Time - expectedOrderedProps []types.CreateConsumerChainProposal + expectedOrderedProps []types.ConsumerRemovalProposal }{ { - propSubmitOrder: []types.CreateConsumerChainProposal{ + propSubmitOrder: []types.ConsumerRemovalProposal{ sampleProp1, sampleProp2, sampleProp3, sampleProp4, sampleProp5, }, accessTime: now.Add(30 * time.Minute), - expectedOrderedProps: []types.CreateConsumerChainProposal{ + expectedOrderedProps: []types.ConsumerRemovalProposal{ sampleProp1, }, }, { - propSubmitOrder: []types.CreateConsumerChainProposal{ + propSubmitOrder: []types.ConsumerRemovalProposal{ sampleProp3, sampleProp2, sampleProp1, sampleProp5, sampleProp4, }, accessTime: now.Add(3 * time.Hour).Add(30 * time.Minute), - expectedOrderedProps: []types.CreateConsumerChainProposal{ + expectedOrderedProps: []types.ConsumerRemovalProposal{ sampleProp1, sampleProp2, sampleProp3, sampleProp4, }, }, { - propSubmitOrder: []types.CreateConsumerChainProposal{ + propSubmitOrder: []types.ConsumerRemovalProposal{ sampleProp5, sampleProp4, sampleProp3, sampleProp2, sampleProp1, }, accessTime: now.Add(5 * time.Hour), - expectedOrderedProps: []types.CreateConsumerChainProposal{ + expectedOrderedProps: []types.ConsumerRemovalProposal{ sampleProp1, sampleProp2, sampleProp3, sampleProp4, sampleProp5, }, }, } for _, tc := range testCases { - providerKeeper, ctx := testkeeper.GetProviderKeeperAndCtx(t) + providerKeeper, ctx, ctrl, _ := testkeeper.GetProviderKeeperAndCtx(t, testkeeper.NewInMemKeeperParams(t)) + defer ctrl.Finish() ctx = ctx.WithBlockTime(tc.accessTime) for _, prop := range tc.propSubmitOrder { - err := providerKeeper.SetPendingCreateProposal(ctx, &prop) - require.NoError(t, err) + providerKeeper.SetPendingConsumerRemovalProp(ctx, prop.ChainId, prop.StopTime) } - propsToExecute := providerKeeper.CreateProposalsToExecute(ctx) + propsToExecute := providerKeeper.ConsumerRemovalPropsToExecute(ctx) require.Equal(t, tc.expectedOrderedProps, propsToExecute) } } + +// TestMakeConsumerGenesis tests the MakeConsumerGenesis keeper method +// +// Note: the initial intention of this test wasn't very clear, it was migrated with best effort +func TestMakeConsumerGenesis(t *testing.T) { + + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState( + &ibctmtypes.ClientState{ + TrustLevel: ibctmtypes.DefaultTrustLevel, + MaxClockDrift: 10000000000, + ProofSpecs: []*_go.ProofSpec{ + { + LeafSpec: &_go.LeafOp{ + Hash: _go.HashOp_SHA256, + PrehashKey: _go.HashOp_NO_HASH, + PrehashValue: _go.HashOp_SHA256, + Length: _go.LengthOp_VAR_PROTO, + Prefix: []byte{0x00}, + }, + InnerSpec: &_go.InnerSpec{ + ChildOrder: []int32{0, 1}, + ChildSize: 33, + MinPrefixLength: 4, + MaxPrefixLength: 12, + Hash: _go.HashOp_SHA256, + }, + MaxDepth: 0, + MinDepth: 0, + }, + { + LeafSpec: &_go.LeafOp{ + Hash: _go.HashOp_SHA256, + PrehashKey: _go.HashOp_NO_HASH, + PrehashValue: _go.HashOp_SHA256, + Length: _go.LengthOp_VAR_PROTO, + Prefix: []byte{0x00}, + }, + InnerSpec: &_go.InnerSpec{ + ChildOrder: []int32{0, 1}, + ChildSize: 32, + MinPrefixLength: 1, + MaxPrefixLength: 1, + Hash: _go.HashOp_SHA256, + }, + MaxDepth: 0, + }, + }, + UpgradePath: []string{"upgrade", "upgradedIBCState"}, + AllowUpdateAfterExpiry: true, + AllowUpdateAfterMisbehaviour: true, + }, + ) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() + + // + // Other setup not covered by custom template client state + // + ctx = ctx.WithChainID("testchain1") // chainID is obtained from ctx + ctx = ctx.WithBlockHeight(5) // RevisionHeight obtained from ctx + gomock.InOrder(testkeeper.GetMocksForMakeConsumerGenesis(ctx, &mocks, 1814400000000000)...) + + actualGenesis, err := providerKeeper.MakeConsumerGenesis(ctx) + require.NoError(t, err) + + jsonString := `{"params":{"enabled":true, "blocks_per_distribution_transmission":1000, "lock_unbonding_on_timeout": false},"new_chain":true,"provider_client_state":{"chain_id":"testchain1","trust_level":{"numerator":1,"denominator":3},"trusting_period":907200000000000,"unbonding_period":1814400000000000,"max_clock_drift":10000000000,"frozen_height":{},"latest_height":{"revision_height":5},"proof_specs":[{"leaf_spec":{"hash":1,"prehash_value":1,"length":1,"prefix":"AA=="},"inner_spec":{"child_order":[0,1],"child_size":33,"min_prefix_length":4,"max_prefix_length":12,"hash":1}},{"leaf_spec":{"hash":1,"prehash_value":1,"length":1,"prefix":"AA=="},"inner_spec":{"child_order":[0,1],"child_size":32,"min_prefix_length":1,"max_prefix_length":1,"hash":1}}],"upgrade_path":["upgrade","upgradedIBCState"],"allow_update_after_expiry":true,"allow_update_after_misbehaviour":true},"provider_consensus_state":{"timestamp":"2020-01-02T00:00:10Z","root":{"hash":"LpGpeyQVLUo9HpdsgJr12NP2eCICspcULiWa5u9udOA="},"next_validators_hash":"E30CE736441FB9101FADDAF7E578ABBE6DFDB67207112350A9A904D554E1F5BE"},"unbonding_sequences":null,"initial_val_set":[{"pub_key":{"type":"tendermint/PubKeyEd25519","value":"dcASx5/LIKZqagJWN0frOlFtcvz91frYmj/zmoZRWro="},"power":1}]}` + + var expectedGenesis consumertypes.GenesisState + err = json.Unmarshal([]byte(jsonString), &expectedGenesis) + require.NoError(t, err) + + // Zeroing out different fields that are challenging to mock + actualGenesis.InitialValSet = []abci.ValidatorUpdate{} + expectedGenesis.InitialValSet = []abci.ValidatorUpdate{} + actualGenesis.ProviderConsensusState = &ibctmtypes.ConsensusState{} + expectedGenesis.ProviderConsensusState = &ibctmtypes.ConsensusState{} + + require.Equal(t, actualGenesis, expectedGenesis, "consumer chain genesis created incorrectly") +} + +// TestBeginBlockInit directly tests BeginBlockInit against the spec using helpers defined above. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-bblock-init1 +// Spec tag:[CCV-PCF-BBLOCK-INIT.1] +func TestBeginBlockInit(t *testing.T) { + + now := time.Now().UTC() + + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() + ctx = ctx.WithBlockTime(now) + + pendingProps := []*providertypes.ConsumerAdditionProposal{ + providertypes.NewConsumerAdditionProposal( + "title", "description", "chain1", clienttypes.NewHeight(3, 4), []byte{}, []byte{}, + now.Add(-time.Hour).UTC()).(*providertypes.ConsumerAdditionProposal), + providertypes.NewConsumerAdditionProposal( + "title", "description", "chain2", clienttypes.NewHeight(3, 4), []byte{}, []byte{}, + now.UTC()).(*providertypes.ConsumerAdditionProposal), + providertypes.NewConsumerAdditionProposal( + "title", "description", "chain3", clienttypes.NewHeight(3, 4), []byte{}, []byte{}, + now.Add(time.Hour).UTC()).(*providertypes.ConsumerAdditionProposal), + } + + gomock.InOrder( + // Expect client creation for the 1st and second proposals (spawn time already passed) + append(testkeeper.GetMocksForCreateConsumerClient(ctx, &mocks, "chain1", clienttypes.NewHeight(3, 4)), + testkeeper.GetMocksForCreateConsumerClient(ctx, &mocks, "chain2", clienttypes.NewHeight(3, 4))...)..., + ) + + for _, prop := range pendingProps { + err := providerKeeper.SetPendingConsumerAdditionProp(ctx, prop) + require.NoError(t, err) + } + + providerKeeper.BeginBlockInit(ctx) + + // Only the 3rd (final) proposal is still stored as pending + _, found := providerKeeper.GetPendingConsumerAdditionProp( + ctx, pendingProps[0].SpawnTime, pendingProps[0].ChainId) + require.False(t, found) + _, found = providerKeeper.GetPendingConsumerAdditionProp( + ctx, pendingProps[1].SpawnTime, pendingProps[1].ChainId) + require.False(t, found) + _, found = providerKeeper.GetPendingConsumerAdditionProp( + ctx, pendingProps[2].SpawnTime, pendingProps[2].ChainId) + require.True(t, found) +} + +// TestBeginBlockCCR tests BeginBlockCCR against the spec. +// +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-bblock-ccr1 +// Spec tag: [CCV-PCF-BBLOCK-CCR.1] +func TestBeginBlockCCR(t *testing.T) { + now := time.Now().UTC() + + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + defer ctrl.Finish() + ctx = ctx.WithBlockTime(now) + + pendingProps := []*providertypes.ConsumerRemovalProposal{ + providertypes.NewConsumerRemovalProposal( + "title", "description", "chain1", now.Add(-time.Hour).UTC(), + ).(*providertypes.ConsumerRemovalProposal), + providertypes.NewConsumerRemovalProposal( + "title", "description", "chain2", now, + ).(*providertypes.ConsumerRemovalProposal), + providertypes.NewConsumerRemovalProposal( + "title", "description", "chain3", now.Add(time.Hour).UTC(), + ).(*providertypes.ConsumerRemovalProposal), + } + + // + // Mock expectations + // + expectations := []*gomock.Call{} + for _, prop := range pendingProps { + // A consumer chain is setup corresponding to each prop, making these mocks necessary + expectations = append(expectations, testkeeper.GetMocksForCreateConsumerClient(ctx, &mocks, + prop.ChainId, clienttypes.NewHeight(2, 3))...) + expectations = append(expectations, testkeeper.GetMocksForSetConsumerChain(ctx, &mocks, prop.ChainId)...) + } + // Only first two consumer chains should be stopped + expectations = append(expectations, testkeeper.GetMocksForStopConsumerChain(ctx, &mocks)...) + expectations = append(expectations, testkeeper.GetMocksForStopConsumerChain(ctx, &mocks)...) + + gomock.InOrder(expectations...) + + // + // Remaining setup + // + for _, prop := range pendingProps { + // Setup a valid consumer chain for each prop + err := providerKeeper.CreateConsumerClient(ctx, prop.ChainId, clienttypes.NewHeight(2, 3), false) + require.NoError(t, err) + err = providerKeeper.SetConsumerChain(ctx, "channelID") + require.NoError(t, err) + + // Set removal props for all consumer chains + providerKeeper.SetPendingConsumerRemovalProp(ctx, prop.ChainId, prop.StopTime) + } + + // + // Test execution + // + providerKeeper.BeginBlockCCR(ctx) + + // Only the 3rd (final) proposal is still stored as pending + found := providerKeeper.GetPendingConsumerRemovalProp( + ctx, pendingProps[0].ChainId, pendingProps[0].StopTime) + require.False(t, found) + found = providerKeeper.GetPendingConsumerRemovalProp( + ctx, pendingProps[1].ChainId, pendingProps[1].StopTime) + require.False(t, found) + found = providerKeeper.GetPendingConsumerRemovalProp( + ctx, pendingProps[2].ChainId, pendingProps[2].StopTime) + require.True(t, found) +} diff --git a/x/ccv/provider/keeper/relay.go b/x/ccv/provider/keeper/relay.go index 283c3ad0c1..19938e0284 100644 --- a/x/ccv/provider/keeper/relay.go +++ b/x/ccv/provider/keeper/relay.go @@ -122,7 +122,7 @@ func (k Keeper) SendValidatorUpdates(ctx sdk.Context) { valUpdateID := k.GetValidatorSetUpdateId(ctx) // get the validator updates from the staking module valUpdates := k.stakingKeeper.GetValidatorUpdates(ctx) - k.IterateConsumerChains(ctx, func(ctx sdk.Context, chainID string) (stop bool) { + k.IterateConsumerChains(ctx, func(ctx sdk.Context, chainID, clientID string) (stop bool) { // check whether there is an established CCV channel to this consumer chain if channelID, found := k.GetChainToChannel(ctx, chainID); found { // Send pending VSC packets to consumer chain diff --git a/x/ccv/provider/module.go b/x/ccv/provider/module.go index 39f11b850f..ac2aeac9f6 100644 --- a/x/ccv/provider/module.go +++ b/x/ccv/provider/module.go @@ -128,12 +128,15 @@ func (am AppModule) RegisterServices(cfg module.Configurator) { providertypes.RegisterQueryServer(cfg.QueryServer(), am.keeper) } -// InitGenesis performs genesis initialization for the provider module. It returns -// no validator updates. +// InitGenesis performs genesis initialization for the provider module. It returns no validator updates. +// Note: This method along with ValidateGenesis satisfies the CCV spec: +// https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-initg1 func (am AppModule) InitGenesis(ctx sdk.Context, cdc codec.JSONCodec, data json.RawMessage) []abci.ValidatorUpdate { var genesisState providertypes.GenesisState cdc.MustUnmarshalJSON(data, &genesisState) + am.keeper.InitGenesis(ctx, &genesisState) + // initialize validator update id // TODO: Include in genesis and initialize from genesis value am.keeper.SetValidatorSetUpdateId(ctx, 1) @@ -152,10 +155,10 @@ func (AppModule) ConsensusVersion() uint64 { return 1 } // BeginBlock implements the AppModule interface func (am AppModule) BeginBlock(ctx sdk.Context, req abci.RequestBeginBlock) { - // Check if there are any consumer chains that are due to be started - am.keeper.IteratePendingCreateProposal(ctx) - // Check if there are any consumer chains that are due to be stopped - am.keeper.IteratePendingStopProposal(ctx) + // Create clients to consumer chains that are due to be spawned via pending consumer addition proposals + am.keeper.BeginBlockInit(ctx) + // Stop and remove state for any consumer chains that are due to be stopped via pending consumer removal proposals + am.keeper.BeginBlockCCR(ctx) } // EndBlock implements the AppModule interface @@ -164,7 +167,7 @@ func (am AppModule) EndBlock(ctx sdk.Context, req abci.RequestEndBlock) []abci.V am.keeper.CompleteMaturedUnbondingOps(ctx) // send validator updates to consumer chains - am.keeper.SendValidatorUpdates(ctx) + am.keeper.TrySendValidatorUpdates(ctx) return []abci.ValidatorUpdate{} } diff --git a/x/ccv/provider/module_test.go b/x/ccv/provider/module_test.go new file mode 100644 index 0000000000..c8ab78de94 --- /dev/null +++ b/x/ccv/provider/module_test.go @@ -0,0 +1,165 @@ +package provider_test + +import ( + "testing" + + capabilitytypes "github.com/cosmos/cosmos-sdk/x/capability/types" + host "github.com/cosmos/ibc-go/v3/modules/core/24-host" + testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + "github.com/cosmos/interchain-security/x/ccv/provider" + "github.com/cosmos/interchain-security/x/ccv/provider/types" + ccv "github.com/cosmos/interchain-security/x/ccv/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/golang/mock/gomock" +) + +// Tests the provider's InitGenesis implementation against the spec. +// See: https://github.com/cosmos/ibc/blob/main/spec/app/ics-028-cross-chain-validation/methods.md#ccv-pcf-initg1 +// Spec tag: [CCV-PCF-INITG.1] +// +// Note: Genesis validation for the provider is tested in TestValidateGenesisState +func TestInitGenesis(t *testing.T) { + + type testCase struct { + name string + // Whether port capability is already bound to the CCV provider module + isBound bool + // Provider's storage of consumer state to test against + consumerStates []types.ConsumerState + // Error returned from ClaimCapability during port binding, default: nil + errFromClaimCap error + // Whether method call should panic, default: false + expPanic bool + } + + tests := []testCase{ + { + name: "already bound port, no consumer states", + isBound: true, + consumerStates: []types.ConsumerState{}, + }, + { + name: "no bound port, no consumer states", + isBound: false, + consumerStates: []types.ConsumerState{}, + }, + { + name: "no bound port, multiple consumer states", + isBound: false, + consumerStates: []types.ConsumerState{ + { + ChainId: "chainId1", + ChannelId: "channelIdToChain1", + }, + { + ChainId: "chainId2", + ChannelId: "channelIdToChain2", + }, + { + ChainId: "chainId3", + ChannelId: "channelIdToChain3", + }, + }, + }, + { + name: "already bound port, one consumer state", + isBound: true, + consumerStates: []types.ConsumerState{ + { + ChainId: "chainId77", + ChannelId: "channelIdToChain77", + }, + }, + }, + { + name: "capability not owned, method should panic", + isBound: false, + consumerStates: []types.ConsumerState{ + { + ChainId: "chainId77", + ChannelId: "channelIdToChain77", + }, + }, + errFromClaimCap: capabilitytypes.ErrCapabilityNotOwned, + expPanic: true, + }, + } + + for _, tc := range tests { + // + // Setup + // + keeperParams := testkeeper.NewInMemKeeperParams(t) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + + appModule := provider.NewAppModule(&providerKeeper) + genState := types.NewGenesisState( + providerKeeper.GetValidatorSetUpdateId(ctx), + nil, + tc.consumerStates, + nil, + nil, + nil, + nil, types.DefaultParams(), + ) + + cdc := keeperParams.Cdc + jsonBytes := cdc.MustMarshalJSON(genState) + + // + // Assert mocked logic before method executes + // + orderedCalls := []*gomock.Call{ + mocks.MockScopedKeeper.EXPECT().GetCapability( + ctx, host.PortPath(ccv.ProviderPortID), + ).Return( + &capabilitytypes.Capability{}, + tc.isBound, // Capability is returned successfully if port capability is already bound to this module. + ), + } + + // If port capability is not already bound, port will be bound and capability claimed. + if !tc.isBound { + dummyCap := &capabilitytypes.Capability{} + + orderedCalls = append(orderedCalls, + mocks.MockPortKeeper.EXPECT().BindPort(ctx, ccv.ProviderPortID).Return(dummyCap), + mocks.MockScopedKeeper.EXPECT().ClaimCapability( + ctx, dummyCap, host.PortPath(ccv.ProviderPortID)).Return(tc.errFromClaimCap), + ) + } + + gomock.InOrder(orderedCalls...) + + // + // Execute method, then assert expected results + // + if tc.expPanic { + require.Panics(t, assert.PanicTestFunc(func() { + appModule.InitGenesis(ctx, cdc, jsonBytes) + }), tc.name) + continue // Nothing else to verify + } + + valUpdates := appModule.InitGenesis(ctx, cdc, jsonBytes) + + numStatesCounted := 0 + for _, state := range tc.consumerStates { + numStatesCounted += 1 + channelID, found := providerKeeper.GetChainToChannel(ctx, state.ChainId) + require.True(t, found) + require.Equal(t, state.ChannelId, channelID) + + chainID, found := providerKeeper.GetChannelToChain(ctx, state.ChannelId) + require.True(t, found) + require.Equal(t, state.ChainId, chainID) + } + require.Equal(t, len(tc.consumerStates), numStatesCounted) + + require.Empty(t, valUpdates, "InitGenesis should return no validator updates") + + ctrl.Finish() + } +} diff --git a/x/ccv/provider/proposal_handler.go b/x/ccv/provider/proposal_handler.go index fb8491ce3d..55c97a26c1 100644 --- a/x/ccv/provider/proposal_handler.go +++ b/x/ccv/provider/proposal_handler.go @@ -8,14 +8,14 @@ import ( "github.com/cosmos/interchain-security/x/ccv/provider/types" ) -// NewConsumerChainProposalHandler defines the CCV provider proposal handler +// NewConsumerChainProposalHandler defines the handler for consumer addition and consumer removal proposals. func NewConsumerChainProposalHandler(k keeper.Keeper) govtypes.Handler { return func(ctx sdk.Context, content govtypes.Content) error { switch c := content.(type) { - case *types.CreateConsumerChainProposal: - return k.CreateConsumerChainProposal(ctx, c) - case *types.StopConsumerChainProposal: - return k.StopConsumerChainProposal(ctx, c) + case *types.ConsumerAdditionProposal: + return k.HandleConsumerAdditionProposal(ctx, c) + case *types.ConsumerRemovalProposal: + return k.HandleConsumerRemovalProposal(ctx, c) default: return sdkerrors.Wrapf(sdkerrors.ErrUnknownRequest, "unrecognized ccv proposal content type: %T", c) } diff --git a/x/ccv/provider/proposal_handler_test.go b/x/ccv/provider/proposal_handler_test.go new file mode 100644 index 0000000000..92c806a1ed --- /dev/null +++ b/x/ccv/provider/proposal_handler_test.go @@ -0,0 +1,90 @@ +package provider_test + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + "github.com/golang/mock/gomock" + "github.com/stretchr/testify/require" + + clienttypes "github.com/cosmos/ibc-go/v3/modules/core/02-client/types" + + "testing" + "time" + + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + testkeeper "github.com/cosmos/interchain-security/testutil/keeper" + "github.com/cosmos/interchain-security/x/ccv/provider" + "github.com/cosmos/interchain-security/x/ccv/provider/types" +) + +// TestConsumerChainProposalHandler tests the highest level handler for proposals concerning both +// creating and stopping consumer chains. +func TestConsumerChainProposalHandler(t *testing.T) { + + // Snapshot times asserted in tests + now := time.Now().UTC() + hourFromNow := now.Add(time.Hour).UTC() + + testCases := []struct { + name string + content govtypes.Content + blockTime time.Time + expValidConsumerAddition bool + expValidConsumerRemoval bool + }{ + { + name: "valid consumer addition proposal", + content: types.NewConsumerAdditionProposal( + "title", "description", "chainID", + clienttypes.NewHeight(2, 3), []byte("gen_hash"), []byte("bin_hash"), now), + blockTime: hourFromNow, // ctx blocktime is after proposal's spawn time + expValidConsumerAddition: true, + }, + { + name: "valid consumer removal proposal", + content: types.NewConsumerRemovalProposal( + "title", "description", "chainID", now), + blockTime: hourFromNow, + expValidConsumerRemoval: true, + }, + { + name: "nil proposal", + content: nil, + blockTime: hourFromNow, + }, + { + name: "unsupported proposal type", + content: distributiontypes.NewCommunityPoolSpendProposal( + "title", "desc", []byte{}, + sdk.NewCoins(sdk.NewCoin("communityfunds", sdk.NewInt(10)))), + }, + } + + for _, tc := range testCases { + + // Setup + keeperParams := testkeeper.NewInMemKeeperParams(t) + keeperParams.SetTemplateClientState(nil) + providerKeeper, ctx, ctrl, mocks := testkeeper.GetProviderKeeperAndCtx(t, keeperParams) + ctx = ctx.WithBlockTime(tc.blockTime) + + // Mock expectations depending on expected outcome + if tc.expValidConsumerAddition { + gomock.InOrder(testkeeper.GetMocksForCreateConsumerClient(ctx, &mocks, "chainID", clienttypes.NewHeight(2, 3))...) + } + if tc.expValidConsumerRemoval { + testkeeper.SetupForStoppingConsumerChain(t, ctx, &providerKeeper, mocks) + } + + // Execution + proposalHandler := provider.NewConsumerChainProposalHandler(providerKeeper) + err := proposalHandler(ctx, tc.content) + + if tc.expValidConsumerAddition || tc.expValidConsumerRemoval { + require.NoError(t, err) + } else { + require.Error(t, err) + } + ctrl.Finish() + } +} diff --git a/x/ccv/provider/types/codec.go b/x/ccv/provider/types/codec.go index 143cd391a0..25250f68c1 100644 --- a/x/ccv/provider/types/codec.go +++ b/x/ccv/provider/types/codec.go @@ -16,7 +16,7 @@ func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { func RegisterInterfaces(registry codectypes.InterfaceRegistry) { registry.RegisterImplementations( (*govtypes.Content)(nil), - &CreateConsumerChainProposal{}, + &ConsumerAdditionProposal{}, ) } diff --git a/x/ccv/provider/types/consumer.go b/x/ccv/provider/types/consumer.go new file mode 100644 index 0000000000..4376678cc2 --- /dev/null +++ b/x/ccv/provider/types/consumer.go @@ -0,0 +1,30 @@ +package types + +import ( + consumertypes "github.com/cosmos/interchain-security/x/ccv/consumer/types" + ccv "github.com/cosmos/interchain-security/x/ccv/types" +) + +func NewConsumerStates( + chainID, + clientID, + channelID string, + initialHeight uint64, + lockUbdTimeout bool, + genesis consumertypes.GenesisState, + unbondingOpsIndexes []UnbondingOpIndex, + pendingValsetChanges []ccv.ValidatorSetChangePacketData, + slashDowntimeAck []string, +) ConsumerState { + return ConsumerState{ + ChainId: chainID, + ClientId: clientID, + ChannelId: channelID, + InitialHeight: initialHeight, + LockUnbondingOnTimeout: true, + UnbondingOpsIndex: unbondingOpsIndexes, + PendingValsetChanges: pendingValsetChanges, + ConsumerGenesis: genesis, + SlashDowntimeAck: slashDowntimeAck, + } +} diff --git a/x/ccv/provider/types/errors.go b/x/ccv/provider/types/errors.go index d438f0262c..decd79474b 100644 --- a/x/ccv/provider/types/errors.go +++ b/x/ccv/provider/types/errors.go @@ -6,8 +6,8 @@ import ( // Provider sentinel errors var ( - ErrInvalidCreateProposal = sdkerrors.Register(ModuleName, 1, "invalid create consumer chain proposal") - ErrInvalidStopProposal = sdkerrors.Register(ModuleName, 2, "invalid stop consumer chain proposal") - ErrUnknownConsumerChainId = sdkerrors.Register(ModuleName, 3, "no consumer chain with this chain id") - ErrUnknownConsumerChannelId = sdkerrors.Register(ModuleName, 4, "no consumer chain with this channel id") + ErrInvalidConsumerAdditionProposal = sdkerrors.Register(ModuleName, 1, "invalid consumer addition proposal") + ErrInvalidConsumerRemovalProp = sdkerrors.Register(ModuleName, 2, "invalid consumer removal proposal") + ErrUnknownConsumerChainId = sdkerrors.Register(ModuleName, 3, "no consumer chain with this chain id") + ErrUnknownConsumerChannelId = sdkerrors.Register(ModuleName, 4, "no consumer chain with this channel id") ) diff --git a/x/ccv/provider/types/genesis.go b/x/ccv/provider/types/genesis.go index 1cedfe0e0c..a3eab5c29a 100644 --- a/x/ccv/provider/types/genesis.go +++ b/x/ccv/provider/types/genesis.go @@ -5,12 +5,29 @@ import ( sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" host "github.com/cosmos/ibc-go/v3/modules/core/24-host" + ccv "github.com/cosmos/interchain-security/x/ccv/types" + types "github.com/cosmos/interchain-security/x/ccv/types" ) -func NewGenesisState(consumerStates []ConsumerState, params Params) *GenesisState { +func NewGenesisState( + vscID uint64, + vscIdToHeights []ValsetUpdateIdToHeight, + consumerStates []ConsumerState, + unbondingOps []types.UnbondingOp, + matureUbdOps *ccv.MaturedUnbondingOps, + additionProposals []ConsumerAdditionProposal, + removalProposals []ConsumerRemovalProposal, + params Params, +) *GenesisState { return &GenesisState{ - ConsumerStates: consumerStates, - Params: params, + ValsetUpdateId: vscID, + ValsetUpdateIdToHeight: vscIdToHeights, + ConsumerStates: consumerStates, + UnbondingOps: unbondingOps, + MatureUnbondingOps: matureUbdOps, + ConsumerAdditionProposals: additionProposals, + ConsumerRemovalProposals: removalProposals, + Params: params, } } diff --git a/x/ccv/provider/types/genesis.pb.go b/x/ccv/provider/types/genesis.pb.go index 180e5788d9..9283d01754 100644 --- a/x/ccv/provider/types/genesis.pb.go +++ b/x/ccv/provider/types/genesis.pb.go @@ -5,7 +5,8 @@ package types import ( fmt "fmt" - _ "github.com/cosmos/interchain-security/x/ccv/types" + types1 "github.com/cosmos/interchain-security/x/ccv/consumer/types" + types "github.com/cosmos/interchain-security/x/ccv/types" _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" io "io" @@ -26,8 +27,22 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // GenesisState defines the CCV provider chain genesis state type GenesisState struct { - ConsumerStates []ConsumerState `protobuf:"bytes,1,rep,name=consumer_states,json=consumerStates,proto3" json:"consumer_states" yaml:"consumer_states"` - Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params"` + // empty for a completely new chain + ValsetUpdateId uint64 `protobuf:"varint,1,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` + // empty for a completely new chain + ConsumerStates []ConsumerState `protobuf:"bytes,2,rep,name=consumer_states,json=consumerStates,proto3" json:"consumer_states" yaml:"consumer_states"` + // UnbondingOps defines the consumer chains that are still unbonding + // empty for a completely new chain + UnbondingOps []types.UnbondingOp `protobuf:"bytes,3,rep,name=unbonding_ops,json=unbondingOps,proto3" json:"unbonding_ops"` + // empty for a completely new chain + MatureUnbondingOps *types.MaturedUnbondingOps `protobuf:"bytes,4,opt,name=mature_unbonding_ops,json=matureUnbondingOps,proto3" json:"mature_unbonding_ops,omitempty"` + // empty for a completely new chain + ValsetUpdateIdToHeight []ValsetUpdateIdToHeight `protobuf:"bytes,5,rep,name=valset_update_id_to_height,json=valsetUpdateIdToHeight,proto3" json:"valset_update_id_to_height"` + // empty for a completely new chain + ConsumerAdditionProposals []ConsumerAdditionProposal `protobuf:"bytes,6,rep,name=consumer_addition_proposals,json=consumerAdditionProposals,proto3" json:"consumer_addition_proposals"` + // empty for a completely new chain + ConsumerRemovalProposals []ConsumerRemovalProposal `protobuf:"bytes,7,rep,name=consumer_removal_proposals,json=consumerRemovalProposals,proto3" json:"consumer_removal_proposals"` + Params Params `protobuf:"bytes,8,opt,name=params,proto3" json:"params"` } func (m *GenesisState) Reset() { *m = GenesisState{} } @@ -63,6 +78,13 @@ func (m *GenesisState) XXX_DiscardUnknown() { var xxx_messageInfo_GenesisState proto.InternalMessageInfo +func (m *GenesisState) GetValsetUpdateId() uint64 { + if m != nil { + return m.ValsetUpdateId + } + return 0 +} + func (m *GenesisState) GetConsumerStates() []ConsumerState { if m != nil { return m.ConsumerStates @@ -70,6 +92,41 @@ func (m *GenesisState) GetConsumerStates() []ConsumerState { return nil } +func (m *GenesisState) GetUnbondingOps() []types.UnbondingOp { + if m != nil { + return m.UnbondingOps + } + return nil +} + +func (m *GenesisState) GetMatureUnbondingOps() *types.MaturedUnbondingOps { + if m != nil { + return m.MatureUnbondingOps + } + return nil +} + +func (m *GenesisState) GetValsetUpdateIdToHeight() []ValsetUpdateIdToHeight { + if m != nil { + return m.ValsetUpdateIdToHeight + } + return nil +} + +func (m *GenesisState) GetConsumerAdditionProposals() []ConsumerAdditionProposal { + if m != nil { + return m.ConsumerAdditionProposals + } + return nil +} + +func (m *GenesisState) GetConsumerRemovalProposals() []ConsumerRemovalProposal { + if m != nil { + return m.ConsumerRemovalProposals + } + return nil +} + func (m *GenesisState) GetParams() Params { if m != nil { return m.Params @@ -77,10 +134,25 @@ func (m *GenesisState) GetParams() Params { return Params{} } -// ConsumerState defines the state that the provider chain stores for each consumer chain +// consumer chain type ConsumerState struct { + // The provider's identifier for this consumer chain. ChainId string `protobuf:"bytes,1,opt,name=chain_id,json=chainId,proto3" json:"chain_id,omitempty"` + // The provider's channel identifier to this consumer chain. ChannelId string `protobuf:"bytes,2,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"` + // ClientID defines the IBC client ID for the consumer chain + ClientId string `protobuf:"bytes,3,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + InitialHeight uint64 `protobuf:"varint,4,opt,name=initial_height,json=initialHeight,proto3" json:"initial_height,omitempty"` + // LockUnbondingOnTimeout defines whether the unbonding funds should be released for this + // chain in case of a IBC channel timeout + LockUnbondingOnTimeout bool `protobuf:"varint,5,opt,name=lock_unbonding_on_timeout,json=lockUnbondingOnTimeout,proto3" json:"lock_unbonding_on_timeout,omitempty"` + // ConsumerGenesis defines the initial consumer chain genesis states + ConsumerGenesis types1.GenesisState `protobuf:"bytes,6,opt,name=consumer_genesis,json=consumerGenesis,proto3" json:"consumer_genesis"` + // PendingValsetChanges defines the pending validator set changes for the consumer chain + PendingValsetChanges []types.ValidatorSetChangePacketData `protobuf:"bytes,7,rep,name=pending_valset_changes,json=pendingValsetChanges,proto3" json:"pending_valset_changes"` + SlashDowntimeAck []string `protobuf:"bytes,8,rep,name=slash_downtime_ack,json=slashDowntimeAck,proto3" json:"slash_downtime_ack,omitempty"` + // UnbondingOpsIndex defines the unbonding operations on the consumer chain + UnbondingOpsIndex []UnbondingOpIndex `protobuf:"bytes,9,rep,name=unbonding_ops_index,json=unbondingOpsIndex,proto3" json:"unbonding_ops_index"` } func (m *ConsumerState) Reset() { *m = ConsumerState{} } @@ -130,9 +202,168 @@ func (m *ConsumerState) GetChannelId() string { return "" } +func (m *ConsumerState) GetClientId() string { + if m != nil { + return m.ClientId + } + return "" +} + +func (m *ConsumerState) GetInitialHeight() uint64 { + if m != nil { + return m.InitialHeight + } + return 0 +} + +func (m *ConsumerState) GetLockUnbondingOnTimeout() bool { + if m != nil { + return m.LockUnbondingOnTimeout + } + return false +} + +func (m *ConsumerState) GetConsumerGenesis() types1.GenesisState { + if m != nil { + return m.ConsumerGenesis + } + return types1.GenesisState{} +} + +func (m *ConsumerState) GetPendingValsetChanges() []types.ValidatorSetChangePacketData { + if m != nil { + return m.PendingValsetChanges + } + return nil +} + +func (m *ConsumerState) GetSlashDowntimeAck() []string { + if m != nil { + return m.SlashDowntimeAck + } + return nil +} + +func (m *ConsumerState) GetUnbondingOpsIndex() []UnbondingOpIndex { + if m != nil { + return m.UnbondingOpsIndex + } + return nil +} + +// UnbondingOpIndex defines the genesis information for each unbonding operations index +// referenced by chain id and valset udpate id +type UnbondingOpIndex struct { + ValsetUpdateId uint64 `protobuf:"varint,1,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` + UnbondingOpIndex []uint64 `protobuf:"varint,2,rep,packed,name=unbonding_op_index,json=unbondingOpIndex,proto3" json:"unbonding_op_index,omitempty"` +} + +func (m *UnbondingOpIndex) Reset() { *m = UnbondingOpIndex{} } +func (m *UnbondingOpIndex) String() string { return proto.CompactTextString(m) } +func (*UnbondingOpIndex) ProtoMessage() {} +func (*UnbondingOpIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_48411d9c7900d48e, []int{2} +} +func (m *UnbondingOpIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UnbondingOpIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UnbondingOpIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UnbondingOpIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_UnbondingOpIndex.Merge(m, src) +} +func (m *UnbondingOpIndex) XXX_Size() int { + return m.Size() +} +func (m *UnbondingOpIndex) XXX_DiscardUnknown() { + xxx_messageInfo_UnbondingOpIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_UnbondingOpIndex proto.InternalMessageInfo + +func (m *UnbondingOpIndex) GetValsetUpdateId() uint64 { + if m != nil { + return m.ValsetUpdateId + } + return 0 +} + +func (m *UnbondingOpIndex) GetUnbondingOpIndex() []uint64 { + if m != nil { + return m.UnbondingOpIndex + } + return nil +} + +// ValsetUpdateIdToHeight defines the genesis information for the mapping +// of each valset udpate id to a block height +type ValsetUpdateIdToHeight struct { + ValsetUpdateId uint64 `protobuf:"varint,1,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` + Height uint64 `protobuf:"varint,2,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ValsetUpdateIdToHeight) Reset() { *m = ValsetUpdateIdToHeight{} } +func (m *ValsetUpdateIdToHeight) String() string { return proto.CompactTextString(m) } +func (*ValsetUpdateIdToHeight) ProtoMessage() {} +func (*ValsetUpdateIdToHeight) Descriptor() ([]byte, []int) { + return fileDescriptor_48411d9c7900d48e, []int{3} +} +func (m *ValsetUpdateIdToHeight) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValsetUpdateIdToHeight) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ValsetUpdateIdToHeight.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ValsetUpdateIdToHeight) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValsetUpdateIdToHeight.Merge(m, src) +} +func (m *ValsetUpdateIdToHeight) XXX_Size() int { + return m.Size() +} +func (m *ValsetUpdateIdToHeight) XXX_DiscardUnknown() { + xxx_messageInfo_ValsetUpdateIdToHeight.DiscardUnknown(m) +} + +var xxx_messageInfo_ValsetUpdateIdToHeight proto.InternalMessageInfo + +func (m *ValsetUpdateIdToHeight) GetValsetUpdateId() uint64 { + if m != nil { + return m.ValsetUpdateId + } + return 0 +} + +func (m *ValsetUpdateIdToHeight) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + func init() { proto.RegisterType((*GenesisState)(nil), "interchain_security.ccv.provider.v1.GenesisState") proto.RegisterType((*ConsumerState)(nil), "interchain_security.ccv.provider.v1.ConsumerState") + proto.RegisterType((*UnbondingOpIndex)(nil), "interchain_security.ccv.provider.v1.UnbondingOpIndex") + proto.RegisterType((*ValsetUpdateIdToHeight)(nil), "interchain_security.ccv.provider.v1.ValsetUpdateIdToHeight") } func init() { @@ -140,29 +371,57 @@ func init() { } var fileDescriptor_48411d9c7900d48e = []byte{ - // 340 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcf, 0x4a, 0xf3, 0x40, - 0x14, 0xc5, 0x33, 0xfd, 0x3e, 0xaa, 0x9d, 0xfa, 0x07, 0x82, 0x48, 0x2d, 0x38, 0x2d, 0xd1, 0x45, - 0x41, 0x9c, 0x21, 0x71, 0xd7, 0x65, 0x5d, 0x48, 0x76, 0x52, 0x5d, 0xb9, 0x29, 0xe9, 0x64, 0x48, - 0x07, 0x9a, 0x4c, 0x98, 0x99, 0x06, 0x8b, 0x2f, 0xe1, 0x63, 0x75, 0xd9, 0x95, 0xb8, 0x2a, 0xd2, - 0xbe, 0x81, 0x4f, 0x20, 0x99, 0xc6, 0x6a, 0x45, 0x21, 0xbb, 0x99, 0x7b, 0xef, 0xef, 0x9c, 0x03, - 0x07, 0xba, 0x3c, 0xd1, 0x4c, 0xd2, 0x51, 0xc0, 0x93, 0x81, 0x62, 0x74, 0x22, 0xb9, 0x9e, 0x12, - 0x4a, 0x33, 0x92, 0x4a, 0x91, 0xf1, 0x90, 0x49, 0x92, 0xb9, 0x24, 0x62, 0x09, 0x53, 0x5c, 0xe1, - 0x54, 0x0a, 0x2d, 0xec, 0xb3, 0x5f, 0x10, 0x4c, 0x69, 0x86, 0x3f, 0x11, 0x9c, 0xb9, 0xcd, 0xa3, - 0x48, 0x44, 0xc2, 0xdc, 0x93, 0xfc, 0xb5, 0x46, 0x9b, 0xe7, 0x7f, 0xb9, 0x65, 0x2e, 0x29, 0x14, - 0xb4, 0x68, 0x7a, 0x65, 0x32, 0x6d, 0xcc, 0x0c, 0xe3, 0xbc, 0x00, 0xb8, 0x77, 0xb3, 0x8e, 0x79, - 0xa7, 0x03, 0xcd, 0xec, 0x27, 0x78, 0x48, 0x45, 0xa2, 0x26, 0x31, 0x93, 0x03, 0x95, 0x4f, 0x54, - 0x03, 0xb4, 0xff, 0x75, 0xea, 0x9e, 0x87, 0x4b, 0xe4, 0xc7, 0xd7, 0x05, 0x6b, 0xc4, 0x7a, 0x68, - 0xb6, 0x68, 0x59, 0xef, 0x8b, 0xd6, 0xf1, 0x34, 0x88, 0xc7, 0x5d, 0xe7, 0x87, 0xb0, 0xd3, 0x3f, - 0xa0, 0xdf, 0xcf, 0x95, 0xed, 0xc3, 0x6a, 0x1a, 0xc8, 0x20, 0x56, 0x8d, 0x4a, 0x1b, 0x74, 0xea, - 0xde, 0x45, 0x29, 0xcf, 0x5b, 0x83, 0xf4, 0xfe, 0xe7, 0x66, 0xfd, 0x42, 0xc0, 0xf1, 0xe1, 0xfe, - 0x56, 0x16, 0xfb, 0x04, 0xee, 0xae, 0x75, 0x78, 0xd8, 0x00, 0x6d, 0xd0, 0xa9, 0xf5, 0x77, 0xcc, - 0xdf, 0x0f, 0xed, 0x53, 0x08, 0xe9, 0x28, 0x48, 0x12, 0x36, 0xce, 0x97, 0x15, 0xb3, 0xac, 0x15, - 0x13, 0x3f, 0xec, 0xdd, 0xcf, 0x96, 0x08, 0xcc, 0x97, 0x08, 0xbc, 0x2d, 0x11, 0x78, 0x5e, 0x21, - 0x6b, 0xbe, 0x42, 0xd6, 0xeb, 0x0a, 0x59, 0x0f, 0xdd, 0x88, 0xeb, 0xd1, 0x64, 0x88, 0xa9, 0x88, - 0x09, 0x15, 0x2a, 0x16, 0x8a, 0x7c, 0x05, 0xbe, 0xdc, 0x74, 0xf0, 0xb8, 0xdd, 0x82, 0x9e, 0xa6, - 0x4c, 0x0d, 0xab, 0xa6, 0x80, 0xab, 0x8f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0x40, 0x06, 0x36, - 0x4a, 0x02, 0x00, 0x00, + // 794 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0x5d, 0x6f, 0xf3, 0x34, + 0x14, 0x6e, 0xd6, 0xd2, 0xb7, 0xf5, 0xfb, 0x6e, 0x14, 0x33, 0x55, 0x59, 0x5f, 0xd1, 0x55, 0x05, + 0x44, 0x25, 0x46, 0xa2, 0x14, 0x21, 0xc1, 0x80, 0x8b, 0x7d, 0x48, 0xd0, 0x0b, 0xc4, 0x94, 0x7d, + 0x5c, 0xec, 0x26, 0x72, 0x1d, 0xab, 0x35, 0x4d, 0xec, 0x28, 0x76, 0xc2, 0x26, 0x84, 0x84, 0xc4, + 0x1f, 0xe0, 0x0f, 0x21, 0x6e, 0x77, 0xb9, 0x4b, 0xae, 0x26, 0xb4, 0xfd, 0x03, 0x7e, 0x01, 0x8a, + 0xe3, 0x76, 0x69, 0xd5, 0x8e, 0xf6, 0x2e, 0x39, 0x8f, 0x9f, 0xe7, 0x3c, 0x3e, 0x3e, 0x3e, 0x06, + 0x0e, 0x65, 0x92, 0xc4, 0x78, 0x8c, 0x28, 0xf3, 0x04, 0xc1, 0x49, 0x4c, 0xe5, 0xad, 0x8d, 0x71, + 0x6a, 0x47, 0x31, 0x4f, 0xa9, 0x4f, 0x62, 0x3b, 0x75, 0xec, 0x11, 0x61, 0x44, 0x50, 0x61, 0x45, + 0x31, 0x97, 0x1c, 0x7e, 0xb8, 0x84, 0x62, 0x61, 0x9c, 0x5a, 0x53, 0x8a, 0x95, 0x3a, 0xad, 0xdd, + 0x11, 0x1f, 0x71, 0xb5, 0xde, 0xce, 0xbe, 0x72, 0x6a, 0xeb, 0xa3, 0x55, 0xd9, 0x52, 0xc7, 0xd6, + 0x0a, 0x92, 0xb7, 0xfa, 0xeb, 0x78, 0x9a, 0x25, 0xfb, 0x1f, 0x0e, 0xe6, 0x4c, 0x24, 0x61, 0xce, + 0x99, 0x7e, 0x6b, 0x8e, 0xb3, 0x0e, 0x67, 0x6e, 0xef, 0xdd, 0xbf, 0xaa, 0xe0, 0xcd, 0x77, 0x79, + 0xe4, 0x5c, 0x22, 0x49, 0x60, 0x0f, 0x34, 0x52, 0x14, 0x08, 0x22, 0xbd, 0x24, 0xf2, 0x91, 0x24, + 0x1e, 0xf5, 0x4d, 0xa3, 0x63, 0xf4, 0x2a, 0xee, 0x4e, 0x1e, 0xbf, 0x54, 0xe1, 0x81, 0x0f, 0x7f, + 0x01, 0xef, 0x4e, 0x75, 0x3d, 0x91, 0x71, 0x85, 0xb9, 0xd5, 0x29, 0xf7, 0x5e, 0xf7, 0xfb, 0xd6, + 0x1a, 0x05, 0xb5, 0x4e, 0x34, 0x57, 0xa5, 0x3d, 0x6e, 0xdf, 0x3d, 0xec, 0x97, 0xfe, 0x7d, 0xd8, + 0x6f, 0xde, 0xa2, 0x30, 0x38, 0xec, 0x2e, 0x08, 0x77, 0xdd, 0x1d, 0x5c, 0x5c, 0x2e, 0xa0, 0x0b, + 0xb6, 0x13, 0x36, 0xe4, 0xcc, 0xa7, 0x6c, 0xe4, 0xf1, 0x48, 0x98, 0x65, 0x95, 0xfa, 0x93, 0x95, + 0xa9, 0x53, 0xc7, 0xba, 0x9c, 0x12, 0x7e, 0x8c, 0x8e, 0x2b, 0x59, 0x3e, 0xf7, 0x4d, 0xf2, 0x1c, + 0x12, 0x10, 0x81, 0xdd, 0x10, 0xc9, 0x24, 0x26, 0xde, 0xbc, 0x74, 0xa5, 0x63, 0xf4, 0x5e, 0xf7, + 0xed, 0x97, 0xa4, 0x7f, 0x50, 0x3c, 0xbf, 0x90, 0x41, 0xb8, 0x30, 0x17, 0x2b, 0xc6, 0xe0, 0xaf, + 0xa0, 0xb5, 0x58, 0x5d, 0x4f, 0x72, 0x6f, 0x4c, 0xe8, 0x68, 0x2c, 0xcd, 0x77, 0xd4, 0x1e, 0xbe, + 0x5e, 0xab, 0x7c, 0x57, 0x73, 0x87, 0x71, 0xc1, 0xbf, 0x57, 0x12, 0x7a, 0x5f, 0xcd, 0x74, 0x29, + 0x0a, 0x7f, 0x37, 0xc0, 0xdb, 0x59, 0x69, 0x91, 0xef, 0x53, 0x49, 0x39, 0xf3, 0xa2, 0x98, 0x47, + 0x5c, 0xa0, 0x40, 0x98, 0x55, 0x65, 0xe0, 0xdb, 0x8d, 0xce, 0xef, 0x48, 0xcb, 0x9c, 0x69, 0x15, + 0x6d, 0x61, 0x0f, 0xaf, 0xc0, 0x05, 0xfc, 0xcd, 0x00, 0xad, 0x99, 0x8b, 0x98, 0x84, 0x3c, 0x45, + 0x41, 0xc1, 0xc4, 0x2b, 0x65, 0xe2, 0x9b, 0x8d, 0x4c, 0xb8, 0xb9, 0xca, 0x82, 0x07, 0x13, 0x2f, + 0x87, 0x05, 0x1c, 0x80, 0x6a, 0x84, 0x62, 0x14, 0x0a, 0xb3, 0xa6, 0x0e, 0xf7, 0xd3, 0xb5, 0xb2, + 0x9d, 0x29, 0x8a, 0x16, 0xd7, 0x02, 0xdd, 0x3f, 0x2b, 0x60, 0x7b, 0xae, 0x97, 0xe1, 0x1e, 0xa8, + 0xe5, 0x42, 0xfa, 0xea, 0xd4, 0xdd, 0x57, 0xea, 0x7f, 0xe0, 0xc3, 0x0f, 0x00, 0xc0, 0x63, 0xc4, + 0x18, 0x09, 0x32, 0x70, 0x4b, 0x81, 0x75, 0x1d, 0x19, 0xf8, 0xf0, 0x2d, 0xa8, 0xe3, 0x80, 0x12, + 0x26, 0x33, 0xb4, 0xac, 0xd0, 0x5a, 0x1e, 0x18, 0xf8, 0xf0, 0x63, 0xb0, 0x43, 0x19, 0x95, 0x14, + 0x05, 0xd3, 0x7e, 0xa9, 0xa8, 0x7b, 0xb9, 0xad, 0xa3, 0xfa, 0x8c, 0xbf, 0x02, 0x7b, 0x01, 0xc7, + 0x93, 0x62, 0x0f, 0x33, 0x4f, 0xd2, 0x90, 0xf0, 0x24, 0xeb, 0x30, 0xa3, 0x57, 0x73, 0x9b, 0xd9, + 0x82, 0xe7, 0xbe, 0x64, 0x17, 0x39, 0x0a, 0x87, 0xa0, 0x31, 0x3b, 0x17, 0x3d, 0x26, 0xcc, 0xaa, + 0xaa, 0x8f, 0xb3, 0xb2, 0x3e, 0xb3, 0x11, 0x94, 0x3a, 0x56, 0x71, 0x90, 0xe8, 0x2a, 0xcd, 0x46, + 0x84, 0xc6, 0xa0, 0x04, 0xcd, 0x88, 0xe4, 0xbe, 0xf4, 0x4d, 0xc8, 0xb6, 0x3f, 0x22, 0xd3, 0x73, + 0xff, 0xf2, 0xa5, 0x6b, 0x76, 0x85, 0x02, 0xea, 0x23, 0xc9, 0xe3, 0x73, 0x22, 0x4f, 0x14, 0xed, + 0x0c, 0xe1, 0x09, 0x91, 0xa7, 0x48, 0x22, 0x9d, 0x70, 0x57, 0xab, 0xe7, 0xf7, 0x23, 0x5f, 0x24, + 0xe0, 0x01, 0x80, 0x22, 0x40, 0x62, 0xec, 0xf9, 0xfc, 0x67, 0x96, 0x15, 0xc3, 0x43, 0x78, 0x62, + 0xd6, 0x3a, 0xe5, 0x5e, 0xdd, 0x6d, 0x28, 0xe4, 0x54, 0x03, 0x47, 0x78, 0x02, 0x27, 0xe0, 0xfd, + 0xb9, 0x09, 0xe0, 0x51, 0xe6, 0x93, 0x1b, 0xb3, 0xae, 0x0c, 0x7e, 0xb1, 0x56, 0xab, 0x14, 0x6e, + 0xfd, 0x20, 0x23, 0x6b, 0x77, 0xef, 0x15, 0x07, 0x8e, 0x02, 0xba, 0x3f, 0x81, 0xc6, 0xe2, 0xe2, + 0x0d, 0x86, 0xf0, 0x01, 0x80, 0x45, 0xab, 0xda, 0x69, 0x36, 0x87, 0x2b, 0x6e, 0x23, 0x59, 0xd0, + 0xed, 0x5e, 0x83, 0xe6, 0xf2, 0xb9, 0xb1, 0x41, 0xc6, 0x26, 0xa8, 0xea, 0xf6, 0xdb, 0x52, 0xb8, + 0xfe, 0x3b, 0xbe, 0xb8, 0x7b, 0x6c, 0x1b, 0xf7, 0x8f, 0x6d, 0xe3, 0x9f, 0xc7, 0xb6, 0xf1, 0xc7, + 0x53, 0xbb, 0x74, 0xff, 0xd4, 0x2e, 0xfd, 0xfd, 0xd4, 0x2e, 0x5d, 0x1f, 0x8e, 0xa8, 0x1c, 0x27, + 0x43, 0x0b, 0xf3, 0xd0, 0xc6, 0x5c, 0x84, 0x5c, 0xd8, 0xcf, 0x25, 0xfc, 0x6c, 0xf6, 0x50, 0xdd, + 0xcc, 0x3f, 0x89, 0xf2, 0x36, 0x22, 0x62, 0x58, 0x55, 0xcf, 0xd4, 0xe7, 0xff, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xb9, 0x12, 0x17, 0x90, 0xd7, 0x07, 0x00, 0x00, } func (m *GenesisState) Marshal() (dAtA []byte, err error) { @@ -194,7 +453,75 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenesis(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0x12 + dAtA[i] = 0x42 + if len(m.ConsumerRemovalProposals) > 0 { + for iNdEx := len(m.ConsumerRemovalProposals) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsumerRemovalProposals[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.ConsumerAdditionProposals) > 0 { + for iNdEx := len(m.ConsumerAdditionProposals) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ConsumerAdditionProposals[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.ValsetUpdateIdToHeight) > 0 { + for iNdEx := len(m.ValsetUpdateIdToHeight) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ValsetUpdateIdToHeight[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.MatureUnbondingOps != nil { + { + size, err := m.MatureUnbondingOps.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.UnbondingOps) > 0 { + for iNdEx := len(m.UnbondingOps) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.UnbondingOps[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } if len(m.ConsumerStates) > 0 { for iNdEx := len(m.ConsumerStates) - 1; iNdEx >= 0; iNdEx-- { { @@ -206,9 +533,14 @@ func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { i = encodeVarintGenesis(dAtA, i, uint64(size)) } i-- - dAtA[i] = 0xa + dAtA[i] = 0x12 } } + if m.ValsetUpdateId != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.ValsetUpdateId)) + i-- + dAtA[i] = 0x8 + } return len(dAtA) - i, nil } @@ -232,6 +564,75 @@ func (m *ConsumerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.UnbondingOpsIndex) > 0 { + for iNdEx := len(m.UnbondingOpsIndex) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.UnbondingOpsIndex[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + if len(m.SlashDowntimeAck) > 0 { + for iNdEx := len(m.SlashDowntimeAck) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.SlashDowntimeAck[iNdEx]) + copy(dAtA[i:], m.SlashDowntimeAck[iNdEx]) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.SlashDowntimeAck[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if len(m.PendingValsetChanges) > 0 { + for iNdEx := len(m.PendingValsetChanges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PendingValsetChanges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + { + size, err := m.ConsumerGenesis.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.LockUnbondingOnTimeout { + i-- + if m.LockUnbondingOnTimeout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.InitialHeight != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.InitialHeight)) + i-- + dAtA[i] = 0x20 + } + if len(m.ClientId) > 0 { + i -= len(m.ClientId) + copy(dAtA[i:], m.ClientId) + i = encodeVarintGenesis(dAtA, i, uint64(len(m.ClientId))) + i-- + dAtA[i] = 0x1a + } if len(m.ChannelId) > 0 { i -= len(m.ChannelId) copy(dAtA[i:], m.ChannelId) @@ -249,41 +650,151 @@ func (m *ConsumerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { - offset -= sovGenesis(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ +func (m *UnbondingOpIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - dAtA[offset] = uint8(v) - return base + return dAtA[:n], nil } -func (m *GenesisState) Size() (n int) { - if m == nil { - return 0 - } + +func (m *UnbondingOpIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UnbondingOpIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i var l int _ = l - if len(m.ConsumerStates) > 0 { - for _, e := range m.ConsumerStates { - l = e.Size() - n += 1 + l + sovGenesis(uint64(l)) + if len(m.UnbondingOpIndex) > 0 { + dAtA5 := make([]byte, len(m.UnbondingOpIndex)*10) + var j4 int + for _, num := range m.UnbondingOpIndex { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintGenesis(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0x12 } - l = m.Params.Size() - n += 1 + l + sovGenesis(uint64(l)) - return n + if m.ValsetUpdateId != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.ValsetUpdateId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil } -func (m *ConsumerState) Size() (n int) { - if m == nil { - return 0 +func (m *ValsetUpdateIdToHeight) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err } - var l int - _ = l - l = len(m.ChainId) + return dAtA[:n], nil +} + +func (m *ValsetUpdateIdToHeight) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValsetUpdateIdToHeight) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x10 + } + if m.ValsetUpdateId != 0 { + i = encodeVarintGenesis(dAtA, i, uint64(m.ValsetUpdateId)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValsetUpdateId != 0 { + n += 1 + sovGenesis(uint64(m.ValsetUpdateId)) + } + if len(m.ConsumerStates) > 0 { + for _, e := range m.ConsumerStates { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.UnbondingOps) > 0 { + for _, e := range m.UnbondingOps { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if m.MatureUnbondingOps != nil { + l = m.MatureUnbondingOps.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + if len(m.ValsetUpdateIdToHeight) > 0 { + for _, e := range m.ValsetUpdateIdToHeight { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.ConsumerAdditionProposals) > 0 { + for _, e := range m.ConsumerAdditionProposals { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.ConsumerRemovalProposals) > 0 { + for _, e := range m.ConsumerRemovalProposals { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func (m *ConsumerState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ChainId) if l > 0 { n += 1 + l + sovGenesis(uint64(l)) } @@ -291,6 +802,70 @@ func (m *ConsumerState) Size() (n int) { if l > 0 { n += 1 + l + sovGenesis(uint64(l)) } + l = len(m.ClientId) + if l > 0 { + n += 1 + l + sovGenesis(uint64(l)) + } + if m.InitialHeight != 0 { + n += 1 + sovGenesis(uint64(m.InitialHeight)) + } + if m.LockUnbondingOnTimeout { + n += 2 + } + l = m.ConsumerGenesis.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.PendingValsetChanges) > 0 { + for _, e := range m.PendingValsetChanges { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.SlashDowntimeAck) > 0 { + for _, s := range m.SlashDowntimeAck { + l = len(s) + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.UnbondingOpsIndex) > 0 { + for _, e := range m.UnbondingOpsIndex { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func (m *UnbondingOpIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValsetUpdateId != 0 { + n += 1 + sovGenesis(uint64(m.ValsetUpdateId)) + } + if len(m.UnbondingOpIndex) > 0 { + l = 0 + for _, e := range m.UnbondingOpIndex { + l += sovGenesis(uint64(e)) + } + n += 1 + sovGenesis(uint64(l)) + l + } + return n +} + +func (m *ValsetUpdateIdToHeight) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ValsetUpdateId != 0 { + n += 1 + sovGenesis(uint64(m.ValsetUpdateId)) + } + if m.Height != 0 { + n += 1 + sovGenesis(uint64(m.Height)) + } return n } @@ -330,6 +905,25 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateId", wireType) + } + m.ValsetUpdateId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field ConsumerStates", wireType) } @@ -363,9 +957,9 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field UnbondingOps", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -392,65 +986,86 @@ func (m *GenesisState) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.UnbondingOps = append(m.UnbondingOps, types.UnbondingOp{}) + if err := m.UnbondingOps[len(m.UnbondingOps)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenesis(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatureUnbondingOps", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenesis } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConsumerState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenesis + if m.MatureUnbondingOps == nil { + m.MatureUnbondingOps = &types.MaturedUnbondingOps{} } - if iNdEx >= l { + if err := m.MatureUnbondingOps.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateIdToHeight", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.ValsetUpdateIdToHeight = append(m.ValsetUpdateIdToHeight, ValsetUpdateIdToHeight{}) + if err := m.ValsetUpdateIdToHeight[len(m.ValsetUpdateIdToHeight)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConsumerState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConsumerState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerAdditionProposals", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenesis @@ -460,29 +1075,31 @@ func (m *ConsumerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenesis } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenesis } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChainId = string(dAtA[iNdEx:postIndex]) + m.ConsumerAdditionProposals = append(m.ConsumerAdditionProposals, ConsumerAdditionProposal{}) + if err := m.ConsumerAdditionProposals[len(m.ConsumerAdditionProposals)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerRemovalProposals", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenesis @@ -492,24 +1109,610 @@ func (m *ConsumerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenesis } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLengthGenesis } if postIndex > l { return io.ErrUnexpectedEOF } - m.ChannelId = string(dAtA[iNdEx:postIndex]) + m.ConsumerRemovalProposals = append(m.ConsumerRemovalProposals, ConsumerRemovalProposal{}) + if err := m.ConsumerRemovalProposals[len(m.ConsumerRemovalProposals)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConsumerState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConsumerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConsumerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChainId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChainId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ChannelId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ChannelId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientId", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientId = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialHeight", wireType) + } + m.InitialHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialHeight |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LockUnbondingOnTimeout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LockUnbondingOnTimeout = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsumerGenesis", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsumerGenesis.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PendingValsetChanges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PendingValsetChanges = append(m.PendingValsetChanges, types.ValidatorSetChangePacketData{}) + if err := m.PendingValsetChanges[len(m.PendingValsetChanges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SlashDowntimeAck", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SlashDowntimeAck = append(m.SlashDowntimeAck, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnbondingOpsIndex", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UnbondingOpsIndex = append(m.UnbondingOpsIndex, UnbondingOpIndex{}) + if err := m.UnbondingOpsIndex[len(m.UnbondingOpsIndex)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UnbondingOpIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnbondingOpIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnbondingOpIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateId", wireType) + } + m.ValsetUpdateId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UnbondingOpIndex = append(m.UnbondingOpIndex, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.UnbondingOpIndex) == 0 { + m.UnbondingOpIndex = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.UnbondingOpIndex = append(m.UnbondingOpIndex, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field UnbondingOpIndex", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValsetUpdateIdToHeight) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValsetUpdateIdToHeight: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValsetUpdateIdToHeight: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ValsetUpdateId", wireType) + } + m.ValsetUpdateId = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ValsetUpdateId |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipGenesis(dAtA[iNdEx:]) diff --git a/x/ccv/provider/types/genesis_test.go b/x/ccv/provider/types/genesis_test.go index d6cf64926d..e1a7556bdc 100644 --- a/x/ccv/provider/types/genesis_test.go +++ b/x/ccv/provider/types/genesis_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/require" ) +// Tests validation of consumer states and params within a provider genesis state func TestValidateGenesisState(t *testing.T) { testCases := []struct { name string @@ -21,7 +22,13 @@ func TestValidateGenesisState(t *testing.T) { { "valid initializing provider genesis with nil updates", types.NewGenesisState( - []types.ConsumerState{{"chainid-1", "channelid"}}, + 0, + nil, + []types.ConsumerState{{ChainId: "chainid-1", ChannelId: "channelid"}}, + nil, + nil, + nil, + nil, types.DefaultParams(), ), true, @@ -29,7 +36,13 @@ func TestValidateGenesisState(t *testing.T) { { "valid validating provider genesis with nil updates", types.NewGenesisState( - []types.ConsumerState{{"chainid-1", "channelid"}}, + 0, + nil, + []types.ConsumerState{{ChainId: "chainid-1", ChannelId: "channelid"}}, + nil, + nil, + nil, + nil, types.DefaultParams(), ), true, @@ -37,12 +50,18 @@ func TestValidateGenesisState(t *testing.T) { { "valid multiple provider genesis with multiple consumer chains", types.NewGenesisState( + 0, + nil, []types.ConsumerState{ - {"chainid-1", "channelid"}, - {"chainid-2", "channelid2"}, - {"chainid-3", "channelid3"}, - {"chainid-4", "channelid4"}, + {ChainId: "chainid-1", ChannelId: "channelid1"}, + {ChainId: "chainid-2", ChannelId: "channelid2"}, + {ChainId: "chainid-3", ChannelId: "channelid3"}, + {ChainId: "chainid-4", ChannelId: "channelid4"}, }, + nil, + nil, + nil, + nil, types.DefaultParams(), ), true, @@ -50,7 +69,13 @@ func TestValidateGenesisState(t *testing.T) { { "valid provider genesis with custom params", types.NewGenesisState( - []types.ConsumerState{{"chainid-1", "channelid"}}, + 0, + nil, + []types.ConsumerState{{ChainId: "chainid-1", ChannelId: "channelid"}}, + nil, + nil, + nil, + nil, types.NewParams(ibctmtypes.NewClientState("", ibctmtypes.DefaultTrustLevel, 0, 0, time.Second*40, clienttypes.Height{}, commitmenttypes.GetSDKSpecs(), []string{"ibc", "upgradedIBCState"}, true, false)), ), @@ -59,7 +84,13 @@ func TestValidateGenesisState(t *testing.T) { { "invalid params", types.NewGenesisState( - []types.ConsumerState{{"chainid-1", "channelid"}}, + 0, + nil, + []types.ConsumerState{{ChainId: "chainid-1", ChannelId: "channelid"}}, + nil, + nil, + nil, + nil, types.NewParams(ibctmtypes.NewClientState("", ibctmtypes.DefaultTrustLevel, 0, 0, 0, clienttypes.Height{}, nil, []string{"ibc", "upgradedIBCState"}, true, false)), ), @@ -68,7 +99,13 @@ func TestValidateGenesisState(t *testing.T) { { "invalid chain id", types.NewGenesisState( - []types.ConsumerState{{" ", "channelid"}}, + 0, + nil, + []types.ConsumerState{{ChainId: "", ChannelId: "channelid"}}, + nil, + nil, + nil, + nil, types.DefaultParams(), ), false, @@ -76,7 +113,13 @@ func TestValidateGenesisState(t *testing.T) { { "invalid channel id", types.NewGenesisState( - []types.ConsumerState{{"chainid", "invalidchannel{}"}}, + 0, + nil, + []types.ConsumerState{{ChainId: "chainid", ChannelId: "ivnalidChannel{}"}}, + nil, + nil, + nil, + nil, types.DefaultParams(), ), false, diff --git a/x/ccv/provider/types/keys.go b/x/ccv/provider/types/keys.go index 68a0e24d65..338921f33c 100644 --- a/x/ccv/provider/types/keys.go +++ b/x/ccv/provider/types/keys.go @@ -52,13 +52,13 @@ const ( // ChainToClientBytePrefix is the byte prefix for storing the consumer chainID for a given consumer clientid. ChainToClientBytePrefix - // PendingCreateProposalBytePrefix is the byte prefix for storing the pending identified consumer chain client before the spawn time occurs. + // PendingCAPBytePrefix is the byte prefix for storing pending consumer addition proposals before the spawn time occurs. // The key includes the BigEndian timestamp to allow for efficient chronological iteration - PendingCreateProposalBytePrefix + PendingCAPBytePrefix - // PendingStopProposalBytePrefix is the byte prefix for storing the pending identified consumer chain before the stop time occurs. + // PendingCRPBytePrefix is the byte prefix for storing pending consumer removal proposals before the stop time occurs. // The key includes the BigEndian timestamp to allow for efficient chronological iteration - PendingStopProposalBytePrefix + PendingCRPBytePrefix // UnbondingOpBytePrefix is the byte prefix that stores a record of all the ids of consumer chains that // need to unbond before a given delegation can unbond on this chain. @@ -123,15 +123,15 @@ func ChainToClientKey(chainID string) []byte { return append([]byte{ChainToClientBytePrefix}, []byte(chainID)...) } -// PendingCreateProposalKey returns the key under which a pending identified client is stored -func PendingCreateProposalKey(timestamp time.Time, chainID string) []byte { +// PendingCAPKey returns the key under which a pending consumer addition proposal is stored +func PendingCAPKey(timestamp time.Time, chainID string) []byte { timeBz := sdk.FormatTimeBytes(timestamp) timeBzL := len(timeBz) - prefixL := len([]byte{PendingCreateProposalBytePrefix}) + prefixL := len([]byte{PendingCAPBytePrefix}) bz := make([]byte, prefixL+8+timeBzL+len(chainID)) // copy the prefix - copy(bz[:prefixL], []byte{PendingCreateProposalBytePrefix}) + copy(bz[:prefixL], []byte{PendingCAPBytePrefix}) // copy the time length copy(bz[prefixL:prefixL+8], sdk.Uint64ToBigEndian(uint64(timeBzL))) // copy the time bytes @@ -141,9 +141,10 @@ func PendingCreateProposalKey(timestamp time.Time, chainID string) []byte { return bz } -// ParsePendingCreateProposalKey returns the time and chain ID for a pending client key or an error if unparseable -func ParsePendingCreateProposalKey(bz []byte) (time.Time, string, error) { - expectedPrefix := []byte{PendingCreateProposalBytePrefix} +// ParsePendingCAPKey returns the time and chain ID for a pending consumer addition proposal key +// or an error if unparsable +func ParsePendingCAPKey(bz []byte) (time.Time, string, error) { + expectedPrefix := []byte{PendingCAPBytePrefix} prefixL := len(expectedPrefix) if prefix := bz[:prefixL]; !bytes.Equal(prefix, expectedPrefix) { return time.Time{}, "", fmt.Errorf("invalid prefix; expected: %X, got: %X", expectedPrefix, prefix) @@ -159,15 +160,15 @@ func ParsePendingCreateProposalKey(bz []byte) (time.Time, string, error) { return timestamp, chainID, nil } -// PendingStopProposalKey returns the key under which pending consumer chain stop proposals are stored -func PendingStopProposalKey(timestamp time.Time, chainID string) []byte { +// PendingCRPKey returns the key under which pending consumer removal proposals are stored +func PendingCRPKey(timestamp time.Time, chainID string) []byte { timeBz := sdk.FormatTimeBytes(timestamp) timeBzL := len(timeBz) - prefixL := len([]byte{PendingStopProposalBytePrefix}) + prefixL := len([]byte{PendingCRPBytePrefix}) bz := make([]byte, prefixL+8+timeBzL+len(chainID)) // copy the prefix - copy(bz[:prefixL], []byte{PendingStopProposalBytePrefix}) + copy(bz[:prefixL], []byte{PendingCRPBytePrefix}) // copy the time length copy(bz[prefixL:prefixL+8], sdk.Uint64ToBigEndian(uint64(timeBzL))) // copy the time bytes @@ -177,9 +178,9 @@ func PendingStopProposalKey(timestamp time.Time, chainID string) []byte { return bz } -// ParsePendingStopProposalKey returns the time and chain ID for a pending consumer chain stop proposal key or an error if unparseable -func ParsePendingStopProposalKey(bz []byte) (time.Time, string, error) { - expectedPrefix := []byte{PendingStopProposalBytePrefix} +// ParsePendingCRPKey returns the time and chain ID for a pending consumer removal proposal key or an error if unparseable +func ParsePendingCRPKey(bz []byte) (time.Time, string, error) { + expectedPrefix := []byte{PendingCRPBytePrefix} prefixL := len(expectedPrefix) if prefix := bz[:prefixL]; !bytes.Equal(prefix, expectedPrefix) { return time.Time{}, "", fmt.Errorf("invalid prefix; expected: %X, got: %X", expectedPrefix, prefix) diff --git a/x/ccv/provider/types/keys_test.go b/x/ccv/provider/types/keys_test.go index 6306f4075e..9faca25676 100644 --- a/x/ccv/provider/types/keys_test.go +++ b/x/ccv/provider/types/keys_test.go @@ -43,8 +43,8 @@ func getSingleByteKeys() [][]byte { keys[i], i = []byte{ChainToChannelBytePrefix}, i+1 keys[i], i = []byte{ChannelToChainBytePrefix}, i+1 keys[i], i = []byte{ChainToClientBytePrefix}, i+1 - keys[i], i = []byte{PendingCreateProposalBytePrefix}, i+1 - keys[i], i = []byte{PendingStopProposalBytePrefix}, i+1 + keys[i], i = []byte{PendingCAPBytePrefix}, i+1 + keys[i], i = []byte{PendingCRPBytePrefix}, i+1 keys[i], i = []byte{UnbondingOpBytePrefix}, i+1 keys[i], i = []byte{UnbondingOpIndexBytePrefix}, i+1 keys[i], i = []byte{ValsetUpdateBlockHeightBytePrefix}, i+1 @@ -57,7 +57,8 @@ func getSingleByteKeys() [][]byte { return keys } -func TestPendingClientKeyAndParse(t *testing.T) { +// Tests the construction and parsing of keys for storing pending consumer addition proposals +func TestPendingCAPKeyAndParse(t *testing.T) { tests := []struct { timestamp time.Time chainID string @@ -69,19 +70,20 @@ func TestPendingClientKeyAndParse(t *testing.T) { } for _, test := range tests { - key := PendingCreateProposalKey(test.timestamp, test.chainID) + key := PendingCAPKey(test.timestamp, test.chainID) require.NotEmpty(t, key) // Expected bytes = prefix + time length + time bytes + length of chainID expectedBytes := 1 + 8 + len(sdk.FormatTimeBytes(time.Time{})) + len(test.chainID) require.Equal(t, expectedBytes, len(key)) - parsedTime, parsedID, err := ParsePendingCreateProposalKey(key) + parsedTime, parsedID, err := ParsePendingCAPKey(key) require.Equal(t, test.timestamp.UTC(), parsedTime.UTC()) require.Equal(t, test.chainID, parsedID) require.NoError(t, err) } } -func TestPendingStopProposalKeyAndParse(t *testing.T) { +// Tests the construction and parsing of keys for storing pending consumer removal proposals +func TestPendingCRPKeyAndParse(t *testing.T) { tests := []struct { timestamp time.Time chainID string @@ -93,12 +95,12 @@ func TestPendingStopProposalKeyAndParse(t *testing.T) { } for _, test := range tests { - key := PendingStopProposalKey(test.timestamp, test.chainID) + key := PendingCRPKey(test.timestamp, test.chainID) require.NotEmpty(t, key) // Expected bytes = prefix + time length + time bytes + length of chainID expectedBytes := 1 + 8 + len(sdk.FormatTimeBytes(time.Time{})) + len(test.chainID) require.Equal(t, expectedBytes, len(key)) - parsedTime, parsedID, err := ParsePendingStopProposalKey(key) + parsedTime, parsedID, err := ParsePendingCRPKey(key) require.Equal(t, test.timestamp.UTC(), parsedTime.UTC()) require.Equal(t, test.chainID, parsedID) require.NoError(t, err) diff --git a/x/ccv/provider/types/proposal.go b/x/ccv/provider/types/proposal.go index 88c593df1b..7462115b9a 100644 --- a/x/ccv/provider/types/proposal.go +++ b/x/ccv/provider/types/proposal.go @@ -11,21 +11,21 @@ import ( ) const ( - ProposalTypeCreateConsumerChain = "CreateConsumerChain" - ProposalTypeStopConsumerChain = "StopConsumerChain" + ProposalTypeConsumerAddition = "ConsumerAddition" + ProposalTypeConsumerRemoval = "ConsumerRemoval" ) var ( - _ govtypes.Content = &CreateConsumerChainProposal{} + _ govtypes.Content = &ConsumerAdditionProposal{} ) func init() { - govtypes.RegisterProposalType(ProposalTypeCreateConsumerChain) + govtypes.RegisterProposalType(ProposalTypeConsumerAddition) } -// NewCreateConsumerChainProposal creates a new create consumerchain proposal. -func NewCreateConsumerChainProposal(title, description, chainID string, initialHeight clienttypes.Height, genesisHash, binaryHash []byte, spawnTime time.Time) govtypes.Content { - return &CreateConsumerChainProposal{ +// NewConsumerAdditionProposal creates a new consumer addition proposal. +func NewConsumerAdditionProposal(title, description, chainID string, initialHeight clienttypes.Height, genesisHash, binaryHash []byte, spawnTime time.Time) govtypes.Content { + return &ConsumerAdditionProposal{ Title: title, Description: description, ChainId: chainID, @@ -36,49 +36,49 @@ func NewCreateConsumerChainProposal(title, description, chainID string, initialH } } -// GetTitle returns the title of a create consumerchain proposal. -func (cccp *CreateConsumerChainProposal) GetTitle() string { return cccp.Title } +// GetTitle returns the title of a consumer addition proposal. +func (cccp *ConsumerAdditionProposal) GetTitle() string { return cccp.Title } -// GetDescription returns the description of a create consumerchain proposal. -func (cccp *CreateConsumerChainProposal) GetDescription() string { return cccp.Description } +// GetDescription returns the description of a consumer addition proposal. +func (cccp *ConsumerAdditionProposal) GetDescription() string { return cccp.Description } -// ProposalRoute returns the routing key of a create consumerchain proposal. -func (cccp *CreateConsumerChainProposal) ProposalRoute() string { return RouterKey } +// ProposalRoute returns the routing key of a consumer addition proposal. +func (cccp *ConsumerAdditionProposal) ProposalRoute() string { return RouterKey } -// ProposalType returns the type of a create consumerchain proposal. -func (cccp *CreateConsumerChainProposal) ProposalType() string { - return ProposalTypeCreateConsumerChain +// ProposalType returns the type of a consumer addition proposal. +func (cccp *ConsumerAdditionProposal) ProposalType() string { + return ProposalTypeConsumerAddition } // ValidateBasic runs basic stateless validity checks -func (cccp *CreateConsumerChainProposal) ValidateBasic() error { +func (cccp *ConsumerAdditionProposal) ValidateBasic() error { if err := govtypes.ValidateAbstract(cccp); err != nil { return err } if strings.TrimSpace(cccp.ChainId) == "" { - return sdkerrors.Wrap(ErrInvalidCreateProposal, "consumer chain id must not be blank") + return sdkerrors.Wrap(ErrInvalidConsumerAdditionProposal, "consumer chain id must not be blank") } if cccp.InitialHeight.IsZero() { - return sdkerrors.Wrap(ErrInvalidCreateProposal, "initial height cannot be zero") + return sdkerrors.Wrap(ErrInvalidConsumerAdditionProposal, "initial height cannot be zero") } if len(cccp.GenesisHash) == 0 { - return sdkerrors.Wrap(ErrInvalidCreateProposal, "genesis hash cannot be empty") + return sdkerrors.Wrap(ErrInvalidConsumerAdditionProposal, "genesis hash cannot be empty") } if len(cccp.BinaryHash) == 0 { - return sdkerrors.Wrap(ErrInvalidCreateProposal, "binary hash cannot be empty") + return sdkerrors.Wrap(ErrInvalidConsumerAdditionProposal, "binary hash cannot be empty") } if cccp.SpawnTime.IsZero() { - return sdkerrors.Wrap(ErrInvalidCreateProposal, "spawn time cannot be zero") + return sdkerrors.Wrap(ErrInvalidConsumerAdditionProposal, "spawn time cannot be zero") } return nil } -// String returns the string representation of the CreateConsumerChainProposal. -func (cccp *CreateConsumerChainProposal) String() string { +// String returns the string representation of the ConsumerAdditionProposal. +func (cccp *ConsumerAdditionProposal) String() string { return fmt.Sprintf(`CreateConsumerChain Proposal Title: %s Description: %s @@ -89,34 +89,34 @@ func (cccp *CreateConsumerChainProposal) String() string { SpawnTime: %s`, cccp.Title, cccp.Description, cccp.ChainId, cccp.InitialHeight, cccp.GenesisHash, cccp.BinaryHash, cccp.SpawnTime) } -// NewStopConsumerChainProposal creates a new stop consumer chain proposal. -func NewStopConsumerChainProposal(title, description, chainID string, stopTime time.Time) (govtypes.Content, error) { - return &StopConsumerChainProposal{ +// NewConsumerRemovalProposal creates a new consumer removal proposal. +func NewConsumerRemovalProposal(title, description, chainID string, stopTime time.Time) govtypes.Content { + return &ConsumerRemovalProposal{ Title: title, Description: description, ChainId: chainID, StopTime: stopTime, - }, nil + } } -// ProposalRoute returns the routing key of a stop consumer chain proposal. -func (sccp *StopConsumerChainProposal) ProposalRoute() string { return RouterKey } +// ProposalRoute returns the routing key of a consumer removal proposal. +func (sccp *ConsumerRemovalProposal) ProposalRoute() string { return RouterKey } -// ProposalType returns the type of a stop consumer chain proposal. -func (sccp *StopConsumerChainProposal) ProposalType() string { return ProposalTypeStopConsumerChain } +// ProposalType returns the type of a consumer removal proposal. +func (sccp *ConsumerRemovalProposal) ProposalType() string { return ProposalTypeConsumerRemoval } // ValidateBasic runs basic stateless validity checks -func (sccp *StopConsumerChainProposal) ValidateBasic() error { +func (sccp *ConsumerRemovalProposal) ValidateBasic() error { if err := govtypes.ValidateAbstract(sccp); err != nil { return err } if strings.TrimSpace(sccp.ChainId) == "" { - return sdkerrors.Wrap(ErrInvalidStopProposal, "consumer chain id must not be blank") + return sdkerrors.Wrap(ErrInvalidConsumerRemovalProp, "consumer chain id must not be blank") } if sccp.StopTime.IsZero() { - return sdkerrors.Wrap(ErrInvalidStopProposal, "spawn time cannot be zero") + return sdkerrors.Wrap(ErrInvalidConsumerRemovalProp, "spawn time cannot be zero") } return nil } diff --git a/x/ccv/provider/types/proposal_test.go b/x/ccv/provider/types/proposal_test.go index 9bc193bd84..18094d4375 100644 --- a/x/ccv/provider/types/proposal_test.go +++ b/x/ccv/provider/types/proposal_test.go @@ -27,22 +27,22 @@ func TestValidateBasic(t *testing.T) { }{ { "success", - types.NewCreateConsumerChainProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()), + types.NewConsumerAdditionProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()), true, }, { "fails validate abstract - empty title", - types.NewCreateConsumerChainProposal(" ", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()), + types.NewConsumerAdditionProposal(" ", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()), false, }, { "chainID is empty", - types.NewCreateConsumerChainProposal("title", "description", " ", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()), + types.NewConsumerAdditionProposal("title", "description", " ", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Now()), false, }, { "initial height is zero", - &types.CreateConsumerChainProposal{ + &types.ConsumerAdditionProposal{ Title: "title", Description: "description", ChainId: "chainID", @@ -55,17 +55,17 @@ func TestValidateBasic(t *testing.T) { }, { "genesis hash is empty", - types.NewCreateConsumerChainProposal("title", "description", "chainID", initialHeight, []byte(""), []byte("bin_hash"), time.Now()), + types.NewConsumerAdditionProposal("title", "description", "chainID", initialHeight, []byte(""), []byte("bin_hash"), time.Now()), false, }, { "binary hash is empty", - types.NewCreateConsumerChainProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte(""), time.Now()), + types.NewConsumerAdditionProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte(""), time.Now()), false, }, { "time is zero", - types.NewCreateConsumerChainProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Time{}), + types.NewConsumerAdditionProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), time.Time{}), false, }, } @@ -81,10 +81,10 @@ func TestValidateBasic(t *testing.T) { } } -func TestMarshalCreateConsumerChainProposal(t *testing.T) { - content := types.NewCreateConsumerChainProposal("title", "description", "chainID", clienttypes.NewHeight(0, 1), []byte("gen_hash"), []byte("bin_hash"), time.Now().UTC()) +func TestMarshalConsumerAdditionProposal(t *testing.T) { + content := types.NewConsumerAdditionProposal("title", "description", "chainID", clienttypes.NewHeight(0, 1), []byte("gen_hash"), []byte("bin_hash"), time.Now().UTC()) - cccp, ok := content.(*types.CreateConsumerChainProposal) + cccp, ok := content.(*types.ConsumerAdditionProposal) require.True(t, ok) // create codec @@ -100,17 +100,17 @@ func TestMarshalCreateConsumerChainProposal(t *testing.T) { require.NoError(t, err) // unmarshal proposal - newCccp := &types.CreateConsumerChainProposal{} + newCccp := &types.ConsumerAdditionProposal{} err = cdc.UnmarshalJSON(bz, newCccp) require.NoError(t, err) require.True(t, proto.Equal(cccp, newCccp), "unmarshalled proposal does not equal original proposal") } -func TestCreateConsumerChainProposalString(t *testing.T) { +func TestConsumerAdditionProposalString(t *testing.T) { initialHeight := clienttypes.NewHeight(2, 3) spawnTime := time.Now() - proposal := types.NewCreateConsumerChainProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), spawnTime) + proposal := types.NewConsumerAdditionProposal("title", "description", "chainID", initialHeight, []byte("gen_hash"), []byte("bin_hash"), spawnTime) expect := fmt.Sprintf(`CreateConsumerChain Proposal Title: title @@ -121,5 +121,5 @@ func TestCreateConsumerChainProposalString(t *testing.T) { BinaryHash: %s SpawnTime: %s`, initialHeight, []byte("gen_hash"), []byte("bin_hash"), spawnTime) - require.Equal(t, expect, proposal.String(), "string method for CreateConsumerChainProposal returned unexpected string") + require.Equal(t, expect, proposal.String(), "string method for ConsumerAdditionProposal returned unexpected string") } diff --git a/x/ccv/provider/types/provider.pb.go b/x/ccv/provider/types/provider.pb.go index 87d5250359..df9f5228ab 100644 --- a/x/ccv/provider/types/provider.pb.go +++ b/x/ccv/provider/types/provider.pb.go @@ -29,10 +29,10 @@ var _ = time.Kitchen // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// CreateConsumerChainProposal is a governance proposal on the provider chain to spawn a new consumer chain. +// ConsumerAdditionProposal is a governance proposal on the provider chain to spawn a new consumer chain. // If it passes, then all validators on the provider chain are expected to validate the consumer chain at spawn time // or get slashed. It is recommended that spawn time occurs after the proposal end time. -type CreateConsumerChainProposal struct { +type ConsumerAdditionProposal struct { // the title of the proposal Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // the description of the proposal @@ -56,17 +56,17 @@ type CreateConsumerChainProposal struct { LockUnbondingOnTimeout bool `protobuf:"varint,8,opt,name=lock_unbonding_on_timeout,json=lockUnbondingOnTimeout,proto3" json:"lock_unbonding_on_timeout,omitempty"` } -func (m *CreateConsumerChainProposal) Reset() { *m = CreateConsumerChainProposal{} } -func (*CreateConsumerChainProposal) ProtoMessage() {} -func (*CreateConsumerChainProposal) Descriptor() ([]byte, []int) { +func (m *ConsumerAdditionProposal) Reset() { *m = ConsumerAdditionProposal{} } +func (*ConsumerAdditionProposal) ProtoMessage() {} +func (*ConsumerAdditionProposal) Descriptor() ([]byte, []int) { return fileDescriptor_f22ec409a72b7b72, []int{0} } -func (m *CreateConsumerChainProposal) XXX_Unmarshal(b []byte) error { +func (m *ConsumerAdditionProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *CreateConsumerChainProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ConsumerAdditionProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_CreateConsumerChainProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_ConsumerAdditionProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -76,22 +76,22 @@ func (m *CreateConsumerChainProposal) XXX_Marshal(b []byte, deterministic bool) return b[:n], nil } } -func (m *CreateConsumerChainProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateConsumerChainProposal.Merge(m, src) +func (m *ConsumerAdditionProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsumerAdditionProposal.Merge(m, src) } -func (m *CreateConsumerChainProposal) XXX_Size() int { +func (m *ConsumerAdditionProposal) XXX_Size() int { return m.Size() } -func (m *CreateConsumerChainProposal) XXX_DiscardUnknown() { - xxx_messageInfo_CreateConsumerChainProposal.DiscardUnknown(m) +func (m *ConsumerAdditionProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ConsumerAdditionProposal.DiscardUnknown(m) } -var xxx_messageInfo_CreateConsumerChainProposal proto.InternalMessageInfo +var xxx_messageInfo_ConsumerAdditionProposal proto.InternalMessageInfo -// StopConsumerProposal is a governance proposal on the provider chain to stop a consumer chain. +// ConsumerRemovalProposal is a governance proposal on the provider chain to remove (and stop) a consumer chain. // If it passes, all the consumer chain's state is removed from the provider chain. The outstanding unbonding // operation funds are released if the LockUnbondingOnTimeout parameter is set to false for the consumer chain ID. -type StopConsumerChainProposal struct { +type ConsumerRemovalProposal struct { // the title of the proposal Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` // the description of the proposal @@ -102,18 +102,18 @@ type StopConsumerChainProposal struct { StopTime time.Time `protobuf:"bytes,4,opt,name=stop_time,json=stopTime,proto3,stdtime" json:"stop_time"` } -func (m *StopConsumerChainProposal) Reset() { *m = StopConsumerChainProposal{} } -func (m *StopConsumerChainProposal) String() string { return proto.CompactTextString(m) } -func (*StopConsumerChainProposal) ProtoMessage() {} -func (*StopConsumerChainProposal) Descriptor() ([]byte, []int) { +func (m *ConsumerRemovalProposal) Reset() { *m = ConsumerRemovalProposal{} } +func (m *ConsumerRemovalProposal) String() string { return proto.CompactTextString(m) } +func (*ConsumerRemovalProposal) ProtoMessage() {} +func (*ConsumerRemovalProposal) Descriptor() ([]byte, []int) { return fileDescriptor_f22ec409a72b7b72, []int{1} } -func (m *StopConsumerChainProposal) XXX_Unmarshal(b []byte) error { +func (m *ConsumerRemovalProposal) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *StopConsumerChainProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *ConsumerRemovalProposal) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_StopConsumerChainProposal.Marshal(b, m, deterministic) + return xxx_messageInfo_ConsumerRemovalProposal.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -123,40 +123,40 @@ func (m *StopConsumerChainProposal) XXX_Marshal(b []byte, deterministic bool) ([ return b[:n], nil } } -func (m *StopConsumerChainProposal) XXX_Merge(src proto.Message) { - xxx_messageInfo_StopConsumerChainProposal.Merge(m, src) +func (m *ConsumerRemovalProposal) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConsumerRemovalProposal.Merge(m, src) } -func (m *StopConsumerChainProposal) XXX_Size() int { +func (m *ConsumerRemovalProposal) XXX_Size() int { return m.Size() } -func (m *StopConsumerChainProposal) XXX_DiscardUnknown() { - xxx_messageInfo_StopConsumerChainProposal.DiscardUnknown(m) +func (m *ConsumerRemovalProposal) XXX_DiscardUnknown() { + xxx_messageInfo_ConsumerRemovalProposal.DiscardUnknown(m) } -var xxx_messageInfo_StopConsumerChainProposal proto.InternalMessageInfo +var xxx_messageInfo_ConsumerRemovalProposal proto.InternalMessageInfo -func (m *StopConsumerChainProposal) GetTitle() string { +func (m *ConsumerRemovalProposal) GetTitle() string { if m != nil { return m.Title } return "" } -func (m *StopConsumerChainProposal) GetDescription() string { +func (m *ConsumerRemovalProposal) GetDescription() string { if m != nil { return m.Description } return "" } -func (m *StopConsumerChainProposal) GetChainId() string { +func (m *ConsumerRemovalProposal) GetChainId() string { if m != nil { return m.ChainId } return "" } -func (m *StopConsumerChainProposal) GetStopTime() time.Time { +func (m *ConsumerRemovalProposal) GetStopTime() time.Time { if m != nil { return m.StopTime } @@ -260,11 +260,58 @@ func (m *HandshakeMetadata) GetVersion() string { return "" } +// SlashAcks contains addesses of consumer chain validators +// successfully slashed on the provider chain +type SlashAcks struct { + Addresses []string `protobuf:"bytes,1,rep,name=addresses,proto3" json:"addresses,omitempty"` +} + +func (m *SlashAcks) Reset() { *m = SlashAcks{} } +func (m *SlashAcks) String() string { return proto.CompactTextString(m) } +func (*SlashAcks) ProtoMessage() {} +func (*SlashAcks) Descriptor() ([]byte, []int) { + return fileDescriptor_f22ec409a72b7b72, []int{4} +} +func (m *SlashAcks) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SlashAcks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SlashAcks.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SlashAcks) XXX_Merge(src proto.Message) { + xxx_messageInfo_SlashAcks.Merge(m, src) +} +func (m *SlashAcks) XXX_Size() int { + return m.Size() +} +func (m *SlashAcks) XXX_DiscardUnknown() { + xxx_messageInfo_SlashAcks.DiscardUnknown(m) +} + +var xxx_messageInfo_SlashAcks proto.InternalMessageInfo + +func (m *SlashAcks) GetAddresses() []string { + if m != nil { + return m.Addresses + } + return nil +} + func init() { - proto.RegisterType((*CreateConsumerChainProposal)(nil), "interchain_security.ccv.provider.v1.CreateConsumerChainProposal") - proto.RegisterType((*StopConsumerChainProposal)(nil), "interchain_security.ccv.provider.v1.StopConsumerChainProposal") + proto.RegisterType((*ConsumerAdditionProposal)(nil), "interchain_security.ccv.provider.v1.ConsumerAdditionProposal") + proto.RegisterType((*ConsumerRemovalProposal)(nil), "interchain_security.ccv.provider.v1.ConsumerRemovalProposal") proto.RegisterType((*Params)(nil), "interchain_security.ccv.provider.v1.Params") proto.RegisterType((*HandshakeMetadata)(nil), "interchain_security.ccv.provider.v1.HandshakeMetadata") + proto.RegisterType((*SlashAcks)(nil), "interchain_security.ccv.provider.v1.SlashAcks") } func init() { @@ -272,49 +319,50 @@ func init() { } var fileDescriptor_f22ec409a72b7b72 = []byte{ - // 610 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xbd, 0x6e, 0xd4, 0x40, - 0x10, 0x3e, 0x93, 0xbf, 0xcb, 0x5e, 0x08, 0xc2, 0x44, 0x91, 0x13, 0xa4, 0xbb, 0xe3, 0x68, 0x4e, - 0x42, 0xd8, 0xba, 0xa4, 0x22, 0x5d, 0x72, 0x12, 0x84, 0x02, 0x11, 0x5d, 0x42, 0x43, 0x81, 0xb5, - 0x5e, 0x4f, 0xec, 0x55, 0xec, 0x1d, 0x6b, 0x77, 0x6d, 0xc8, 0x13, 0x40, 0x99, 0x92, 0x32, 0xaf, - 0xc0, 0x5b, 0xa4, 0x4c, 0x49, 0x05, 0x28, 0x79, 0x11, 0xe4, 0x5d, 0x3b, 0x09, 0x12, 0x0d, 0x0d, - 0xdd, 0xcc, 0x37, 0xdf, 0x67, 0xcf, 0xcc, 0xb7, 0xbb, 0x64, 0x8b, 0x0b, 0x0d, 0x92, 0xa5, 0x94, - 0x8b, 0x50, 0x01, 0x2b, 0x25, 0xd7, 0xa7, 0x01, 0x63, 0x55, 0x50, 0x48, 0xac, 0x78, 0x0c, 0x32, - 0xa8, 0x26, 0x37, 0xb1, 0x5f, 0x48, 0xd4, 0xe8, 0x3e, 0xfd, 0x8b, 0xc6, 0x67, 0xac, 0xf2, 0x6f, - 0x78, 0xd5, 0x64, 0x73, 0x2d, 0xc1, 0x04, 0x0d, 0x3f, 0xa8, 0x23, 0x2b, 0xdd, 0x1c, 0x24, 0x88, - 0x49, 0x06, 0x81, 0xc9, 0xa2, 0xf2, 0x38, 0xd0, 0x3c, 0x07, 0xa5, 0x69, 0x5e, 0xb4, 0x04, 0x1e, - 0xb1, 0x80, 0xa1, 0x84, 0x80, 0x65, 0x1c, 0x84, 0xae, 0x7f, 0x6f, 0xa3, 0x86, 0x10, 0xd4, 0x84, - 0x8c, 0x27, 0xa9, 0xb6, 0xb0, 0x0a, 0x34, 0x88, 0x18, 0x64, 0xce, 0x2d, 0xf9, 0x36, 0xb3, 0x82, - 0xd1, 0xe7, 0x39, 0xf2, 0x78, 0x2a, 0x81, 0x6a, 0x98, 0xa2, 0x50, 0x65, 0x0e, 0x72, 0x5a, 0x77, - 0x7e, 0x20, 0xb1, 0x40, 0x45, 0x33, 0x77, 0x8d, 0x2c, 0x68, 0xae, 0x33, 0xf0, 0x9c, 0xa1, 0x33, - 0x5e, 0x9e, 0xd9, 0xc4, 0x1d, 0x92, 0x5e, 0x0c, 0x8a, 0x49, 0x5e, 0x68, 0x8e, 0xc2, 0xbb, 0x67, - 0x6a, 0x77, 0x21, 0x77, 0x83, 0x74, 0xed, 0x0a, 0x78, 0xec, 0xcd, 0x99, 0xf2, 0x92, 0xc9, 0x5f, - 0xc7, 0xee, 0x2b, 0xb2, 0xca, 0x05, 0xd7, 0x9c, 0x66, 0x61, 0x0a, 0x75, 0xab, 0xde, 0xfc, 0xd0, - 0x19, 0xf7, 0xb6, 0x36, 0x7d, 0x1e, 0x31, 0xbf, 0x9e, 0xce, 0x6f, 0x66, 0xaa, 0x26, 0xfe, 0xbe, - 0x61, 0xec, 0xcd, 0x5f, 0xfc, 0x18, 0x74, 0x66, 0xf7, 0x1b, 0x9d, 0x05, 0xdd, 0x27, 0x64, 0x25, - 0x01, 0x01, 0x8a, 0xab, 0x30, 0xa5, 0x2a, 0xf5, 0x16, 0x86, 0xce, 0x78, 0x65, 0xd6, 0x6b, 0xb0, - 0x7d, 0xaa, 0x52, 0x77, 0x40, 0x7a, 0x11, 0x17, 0x54, 0x9e, 0x5a, 0xc6, 0xa2, 0x61, 0x10, 0x0b, - 0x19, 0xc2, 0x94, 0x10, 0x55, 0xd0, 0x8f, 0x22, 0xac, 0x57, 0xed, 0x2d, 0x35, 0x8d, 0x58, 0x1f, - 0xfc, 0xd6, 0x07, 0xff, 0xa8, 0xf5, 0x61, 0xaf, 0x5b, 0x37, 0x72, 0xf6, 0x73, 0xe0, 0xcc, 0x96, - 0x8d, 0xae, 0xae, 0xb8, 0x2f, 0xc8, 0x46, 0x86, 0xec, 0x24, 0x2c, 0x45, 0x84, 0x22, 0xe6, 0x22, - 0x09, 0xd1, 0x7e, 0x10, 0x4b, 0xed, 0x75, 0x87, 0xce, 0xb8, 0x3b, 0x5b, 0xaf, 0x09, 0xef, 0xda, - 0xfa, 0x5b, 0xa3, 0xc3, 0x52, 0xef, 0x74, 0xbf, 0x9c, 0x0f, 0x3a, 0x5f, 0xcf, 0x07, 0x9d, 0xd1, - 0x37, 0x87, 0x6c, 0x1c, 0x6a, 0x2c, 0xfe, 0x9b, 0x0f, 0xbb, 0x64, 0x59, 0x69, 0x2c, 0xec, 0xe4, - 0xf3, 0xff, 0x30, 0x79, 0xb7, 0x96, 0xd5, 0x85, 0xd1, 0x07, 0xb2, 0x78, 0x40, 0x25, 0xcd, 0x95, - 0x7b, 0x44, 0x1e, 0x68, 0xc8, 0x8b, 0x8c, 0x6a, 0x08, 0xad, 0x7b, 0xa6, 0xd3, 0xde, 0xd6, 0x33, - 0xe3, 0xea, 0xdd, 0x23, 0xe9, 0xdf, 0x39, 0x84, 0xd5, 0xc4, 0x9f, 0x1a, 0xf4, 0x50, 0x53, 0x0d, - 0xb3, 0xd5, 0xf6, 0x1b, 0x16, 0x1c, 0x45, 0xe4, 0xe1, 0x3e, 0x15, 0xb1, 0x4a, 0xe9, 0x09, 0xbc, - 0x01, 0x4d, 0x63, 0xaa, 0xa9, 0xbb, 0x4d, 0xd6, 0xdb, 0xab, 0x14, 0x1e, 0x03, 0x84, 0x05, 0x62, - 0x16, 0xd2, 0x38, 0x96, 0xcd, 0x6e, 0x1e, 0xb5, 0xd5, 0x97, 0x00, 0x07, 0x88, 0xd9, 0x6e, 0x1c, - 0x4b, 0xd7, 0x23, 0x4b, 0x15, 0x48, 0x75, 0xbb, 0xa5, 0x36, 0xdd, 0x3b, 0xba, 0xb8, 0xea, 0x3b, - 0x97, 0x57, 0x7d, 0xe7, 0xd7, 0x55, 0xdf, 0x39, 0xbb, 0xee, 0x77, 0x2e, 0xaf, 0xfb, 0x9d, 0xef, - 0xd7, 0xfd, 0xce, 0xfb, 0x9d, 0x84, 0xeb, 0xb4, 0x8c, 0x7c, 0x86, 0x79, 0xc0, 0x50, 0xe5, 0xa8, - 0x82, 0xdb, 0xbb, 0xfd, 0xfc, 0xe6, 0x3d, 0xf8, 0xf4, 0xe7, 0x8b, 0xa0, 0x4f, 0x0b, 0x50, 0xd1, - 0xa2, 0xd9, 0xe0, 0xf6, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc5, 0x6c, 0x6d, 0x4d, 0x42, 0x04, - 0x00, 0x00, + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x54, 0xbf, 0x6f, 0xd4, 0x30, + 0x14, 0xbe, 0xd0, 0x5f, 0x77, 0xbe, 0x52, 0x44, 0xa8, 0x4a, 0x5a, 0xa1, 0xbb, 0xe3, 0x58, 0x0e, + 0x21, 0x12, 0x5d, 0x3b, 0xd1, 0xed, 0x5a, 0x09, 0xca, 0x80, 0xa8, 0xd2, 0xb2, 0x30, 0x10, 0x39, + 0xf6, 0x6b, 0x62, 0x35, 0xb1, 0x23, 0xdb, 0x09, 0x74, 0x67, 0x60, 0xec, 0xc8, 0xd8, 0xff, 0x80, + 0x7f, 0xa3, 0x63, 0x47, 0x26, 0x40, 0xed, 0x3f, 0x82, 0x62, 0x5f, 0x7a, 0x87, 0xc4, 0xc2, 0xc0, + 0xe6, 0xf7, 0xbd, 0xef, 0x73, 0xde, 0x7b, 0x9f, 0xf3, 0xd0, 0x36, 0xe3, 0x1a, 0x24, 0x49, 0x31, + 0xe3, 0x91, 0x02, 0x52, 0x4a, 0xa6, 0xcf, 0x02, 0x42, 0xaa, 0xa0, 0x90, 0xa2, 0x62, 0x14, 0x64, + 0x50, 0x8d, 0x6f, 0xcf, 0x7e, 0x21, 0x85, 0x16, 0xee, 0x93, 0xbf, 0x68, 0x7c, 0x42, 0x2a, 0xff, + 0x96, 0x57, 0x8d, 0xb7, 0xd6, 0x13, 0x91, 0x08, 0xc3, 0x0f, 0xea, 0x93, 0x95, 0x6e, 0xf5, 0x13, + 0x21, 0x92, 0x0c, 0x02, 0x13, 0xc5, 0xe5, 0x49, 0xa0, 0x59, 0x0e, 0x4a, 0xe3, 0xbc, 0x68, 0x08, + 0x2c, 0x26, 0x01, 0x11, 0x12, 0x02, 0x92, 0x31, 0xe0, 0xba, 0xfe, 0xbc, 0x3d, 0x4d, 0x09, 0x41, + 0x4d, 0xc8, 0x58, 0x92, 0x6a, 0x0b, 0xab, 0x40, 0x03, 0xa7, 0x20, 0x73, 0x66, 0xc9, 0xb3, 0xc8, + 0x0a, 0x86, 0x9f, 0x17, 0x90, 0xb7, 0x2f, 0xb8, 0x2a, 0x73, 0x90, 0x13, 0x4a, 0x99, 0x66, 0x82, + 0x1f, 0x4a, 0x51, 0x08, 0x85, 0x33, 0x77, 0x1d, 0x2d, 0x69, 0xa6, 0x33, 0xf0, 0x9c, 0x81, 0x33, + 0xea, 0x84, 0x36, 0x70, 0x07, 0xa8, 0x4b, 0x41, 0x11, 0xc9, 0x8a, 0x9a, 0xec, 0xdd, 0x31, 0xb9, + 0x79, 0xc8, 0xdd, 0x44, 0x6d, 0xdb, 0x3f, 0xa3, 0xde, 0x82, 0x49, 0xaf, 0x98, 0xf8, 0x35, 0x75, + 0x5f, 0xa1, 0x35, 0xc6, 0x99, 0x66, 0x38, 0x8b, 0x52, 0xa8, 0xeb, 0xf4, 0x16, 0x07, 0xce, 0xa8, + 0xbb, 0xbd, 0xe5, 0xb3, 0x98, 0xf8, 0x75, 0x6b, 0xfe, 0xb4, 0xa1, 0x6a, 0xec, 0x1f, 0x18, 0xc6, + 0xde, 0xe2, 0xe5, 0x8f, 0x7e, 0x2b, 0xbc, 0x3b, 0xd5, 0x59, 0xd0, 0x7d, 0x8c, 0x56, 0x13, 0xe0, + 0xa0, 0x98, 0x8a, 0x52, 0xac, 0x52, 0x6f, 0x69, 0xe0, 0x8c, 0x56, 0xc3, 0xee, 0x14, 0x3b, 0xc0, + 0x2a, 0x75, 0xfb, 0xa8, 0x1b, 0x33, 0x8e, 0xe5, 0x99, 0x65, 0x2c, 0x1b, 0x06, 0xb2, 0x90, 0x21, + 0xec, 0x23, 0xa4, 0x0a, 0xfc, 0x91, 0x47, 0xf5, 0x9c, 0xbd, 0x95, 0x69, 0x21, 0xd6, 0x04, 0xbf, + 0x31, 0xc1, 0x3f, 0x6e, 0x4c, 0xd8, 0x6b, 0xd7, 0x85, 0x9c, 0xff, 0xec, 0x3b, 0x61, 0xc7, 0xe8, + 0xea, 0x8c, 0xfb, 0x02, 0x6d, 0x66, 0x82, 0x9c, 0x46, 0x25, 0x8f, 0x05, 0xa7, 0x8c, 0x27, 0x91, + 0xb0, 0x17, 0x8a, 0x52, 0x7b, 0xed, 0x81, 0x33, 0x6a, 0x87, 0x1b, 0x35, 0xe1, 0x5d, 0x93, 0x7f, + 0x6b, 0x74, 0xa2, 0xd4, 0xbb, 0xed, 0x2f, 0x17, 0xfd, 0xd6, 0xd7, 0x8b, 0x7e, 0x6b, 0xf8, 0xcd, + 0x41, 0x0f, 0x1b, 0x1b, 0x42, 0xc8, 0x45, 0x85, 0xb3, 0xff, 0xe9, 0xc2, 0x04, 0x75, 0x94, 0x16, + 0x85, 0xed, 0x7b, 0xf1, 0x1f, 0xfa, 0x6e, 0xd7, 0xb2, 0x3a, 0x31, 0xfc, 0x80, 0x96, 0x0f, 0xb1, + 0xc4, 0xb9, 0x72, 0x8f, 0xd1, 0x3d, 0x0d, 0x79, 0x91, 0x61, 0x0d, 0x91, 0xf5, 0xce, 0x54, 0xda, + 0xdd, 0x7e, 0x66, 0x3c, 0x9d, 0x7f, 0x8d, 0xfe, 0xdc, 0xfb, 0xab, 0xc6, 0xfe, 0xbe, 0x41, 0x8f, + 0x34, 0xd6, 0x10, 0xae, 0x35, 0x77, 0x58, 0x70, 0x18, 0xa3, 0xfb, 0x07, 0x98, 0x53, 0x95, 0xe2, + 0x53, 0x78, 0x03, 0x1a, 0x53, 0xac, 0xb1, 0xbb, 0x83, 0x36, 0x9a, 0xbf, 0x28, 0x3a, 0x01, 0x88, + 0x0a, 0x21, 0xb2, 0x08, 0x53, 0x2a, 0xa7, 0xb3, 0x79, 0xd0, 0x64, 0x5f, 0x02, 0x1c, 0x0a, 0x91, + 0x4d, 0x28, 0x95, 0xae, 0x87, 0x56, 0x2a, 0x90, 0x6a, 0x36, 0xa5, 0x26, 0x1c, 0x3e, 0x45, 0x9d, + 0xa3, 0x0c, 0xab, 0x74, 0x42, 0x4e, 0x95, 0xfb, 0x08, 0x75, 0xea, 0x9b, 0x40, 0x29, 0x50, 0x9e, + 0x33, 0x58, 0x18, 0x75, 0xc2, 0x19, 0xb0, 0x77, 0x7c, 0x79, 0xdd, 0x73, 0xae, 0xae, 0x7b, 0xce, + 0xaf, 0xeb, 0x9e, 0x73, 0x7e, 0xd3, 0x6b, 0x5d, 0xdd, 0xf4, 0x5a, 0xdf, 0x6f, 0x7a, 0xad, 0xf7, + 0xbb, 0x09, 0xd3, 0x69, 0x19, 0xfb, 0x44, 0xe4, 0x01, 0x11, 0x2a, 0x17, 0x2a, 0x98, 0x6d, 0x80, + 0xe7, 0xb7, 0x5b, 0xe3, 0xd3, 0x9f, 0x7b, 0x43, 0x9f, 0x15, 0xa0, 0xe2, 0x65, 0x33, 0xec, 0x9d, + 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xad, 0x37, 0xb8, 0x76, 0x68, 0x04, 0x00, 0x00, } -func (m *CreateConsumerChainProposal) Marshal() (dAtA []byte, err error) { +func (m *ConsumerAdditionProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -324,12 +372,12 @@ func (m *CreateConsumerChainProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CreateConsumerChainProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *ConsumerAdditionProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *CreateConsumerChainProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ConsumerAdditionProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -400,7 +448,7 @@ func (m *CreateConsumerChainProposal) MarshalToSizedBuffer(dAtA []byte) (int, er return len(dAtA) - i, nil } -func (m *StopConsumerChainProposal) Marshal() (dAtA []byte, err error) { +func (m *ConsumerRemovalProposal) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -410,12 +458,12 @@ func (m *StopConsumerChainProposal) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StopConsumerChainProposal) MarshalTo(dAtA []byte) (int, error) { +func (m *ConsumerRemovalProposal) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StopConsumerChainProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *ConsumerRemovalProposal) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -524,6 +572,38 @@ func (m *HandshakeMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SlashAcks) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SlashAcks) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SlashAcks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Addresses[iNdEx]) + copy(dAtA[i:], m.Addresses[iNdEx]) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Addresses[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { offset -= sovProvider(v) base := offset @@ -535,7 +615,7 @@ func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *CreateConsumerChainProposal) Size() (n int) { +func (m *ConsumerAdditionProposal) Size() (n int) { if m == nil { return 0 } @@ -571,7 +651,7 @@ func (m *CreateConsumerChainProposal) Size() (n int) { return n } -func (m *StopConsumerChainProposal) Size() (n int) { +func (m *ConsumerRemovalProposal) Size() (n int) { if m == nil { return 0 } @@ -624,13 +704,28 @@ func (m *HandshakeMetadata) Size() (n int) { return n } +func (m *SlashAcks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, s := range m.Addresses { + l = len(s) + n += 1 + l + sovProvider(uint64(l)) + } + } + return n +} + func sovProvider(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozProvider(x uint64) (n int) { return sovProvider(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *CreateConsumerChainProposal) Unmarshal(dAtA []byte) error { +func (m *ConsumerAdditionProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -653,10 +748,10 @@ func (m *CreateConsumerChainProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CreateConsumerChainProposal: wiretype end group for non-group") + return fmt.Errorf("proto: ConsumerAdditionProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CreateConsumerChainProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConsumerAdditionProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -930,7 +1025,7 @@ func (m *CreateConsumerChainProposal) Unmarshal(dAtA []byte) error { } return nil } -func (m *StopConsumerChainProposal) Unmarshal(dAtA []byte) error { +func (m *ConsumerRemovalProposal) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -953,10 +1048,10 @@ func (m *StopConsumerChainProposal) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StopConsumerChainProposal: wiretype end group for non-group") + return fmt.Errorf("proto: ConsumerRemovalProposal: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StopConsumerChainProposal: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConsumerRemovalProposal: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1309,6 +1404,88 @@ func (m *HandshakeMetadata) Unmarshal(dAtA []byte) error { } return nil } +func (m *SlashAcks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SlashAcks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SlashAcks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipProvider(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/ccv/provider/types/query.pb.go b/x/ccv/provider/types/query.pb.go index 2bba6e200b..0bf62a2b99 100644 --- a/x/ccv/provider/types/query.pb.go +++ b/x/ccv/provider/types/query.pb.go @@ -166,8 +166,8 @@ const _ = grpc.SupportPackageIsVersion4 // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type QueryClient interface { - // ConsumerGenesis queries the genesis state needed to start a consumer chain whose proposal - // has been accepted + // ConsumerGenesis queries the genesis state needed to start a consumer chain + // whose proposal has been accepted ConsumerGenesis(ctx context.Context, in *QueryConsumerGenesisRequest, opts ...grpc.CallOption) (*QueryConsumerGenesisResponse, error) } @@ -190,8 +190,8 @@ func (c *queryClient) ConsumerGenesis(ctx context.Context, in *QueryConsumerGene // QueryServer is the server API for Query service. type QueryServer interface { - // ConsumerGenesis queries the genesis state needed to start a consumer chain whose proposal - // has been accepted + // ConsumerGenesis queries the genesis state needed to start a consumer chain + // whose proposal has been accepted ConsumerGenesis(context.Context, *QueryConsumerGenesisRequest) (*QueryConsumerGenesisResponse, error) } diff --git a/x/ccv/types/ccv.pb.go b/x/ccv/types/ccv.pb.go index d6a17438a0..f5ae919aa0 100644 --- a/x/ccv/types/ccv.pb.go +++ b/x/ccv/types/ccv.pb.go @@ -25,10 +25,11 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// This packet is sent from provider chain to consumer chain if the validator set for consumer chain -// changes (due to new bonding/unbonding messages or slashing events) -// A VSCMatured packet from consumer chain will be sent asynchronously once unbonding period is over, -// and this will function as `UnbondingOver` message for this packet. +// This packet is sent from provider chain to consumer chain if the validator +// set for consumer chain changes (due to new bonding/unbonding messages or +// slashing events) A VSCMatured packet from consumer chain will be sent +// asynchronously once unbonding period is over, and this will function as +// `UnbondingOver` message for this packet. type ValidatorSetChangePacketData struct { ValidatorUpdates []types.ValidatorUpdate `protobuf:"bytes,1,rep,name=validator_updates,json=validatorUpdates,proto3" json:"validator_updates" yaml:"validator_updates"` ValsetUpdateId uint64 `protobuf:"varint,2,opt,name=valset_update_id,json=valsetUpdateId,proto3" json:"valset_update_id,omitempty"` @@ -256,11 +257,103 @@ func (m *SlashPacketData) GetInfraction() types1.InfractionType { return types1.InfractionEmpty } +// UnbondingOpsIndex defines a list of unbonding operation ids. +type UnbondingOpsIndex struct { + Ids []uint64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"` +} + +func (m *UnbondingOpsIndex) Reset() { *m = UnbondingOpsIndex{} } +func (m *UnbondingOpsIndex) String() string { return proto.CompactTextString(m) } +func (*UnbondingOpsIndex) ProtoMessage() {} +func (*UnbondingOpsIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_68bd5f3242e6f29c, []int{4} +} +func (m *UnbondingOpsIndex) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UnbondingOpsIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_UnbondingOpsIndex.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *UnbondingOpsIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_UnbondingOpsIndex.Merge(m, src) +} +func (m *UnbondingOpsIndex) XXX_Size() int { + return m.Size() +} +func (m *UnbondingOpsIndex) XXX_DiscardUnknown() { + xxx_messageInfo_UnbondingOpsIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_UnbondingOpsIndex proto.InternalMessageInfo + +func (m *UnbondingOpsIndex) GetIds() []uint64 { + if m != nil { + return m.Ids + } + return nil +} + +// MaturedUnbondingOps defines a list of ids corresponding to ids of matured unbonding operations. +type MaturedUnbondingOps struct { + Ids []uint64 `protobuf:"varint,1,rep,packed,name=ids,proto3" json:"ids,omitempty"` +} + +func (m *MaturedUnbondingOps) Reset() { *m = MaturedUnbondingOps{} } +func (m *MaturedUnbondingOps) String() string { return proto.CompactTextString(m) } +func (*MaturedUnbondingOps) ProtoMessage() {} +func (*MaturedUnbondingOps) Descriptor() ([]byte, []int) { + return fileDescriptor_68bd5f3242e6f29c, []int{5} +} +func (m *MaturedUnbondingOps) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MaturedUnbondingOps) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MaturedUnbondingOps.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MaturedUnbondingOps) XXX_Merge(src proto.Message) { + xxx_messageInfo_MaturedUnbondingOps.Merge(m, src) +} +func (m *MaturedUnbondingOps) XXX_Size() int { + return m.Size() +} +func (m *MaturedUnbondingOps) XXX_DiscardUnknown() { + xxx_messageInfo_MaturedUnbondingOps.DiscardUnknown(m) +} + +var xxx_messageInfo_MaturedUnbondingOps proto.InternalMessageInfo + +func (m *MaturedUnbondingOps) GetIds() []uint64 { + if m != nil { + return m.Ids + } + return nil +} + func init() { proto.RegisterType((*ValidatorSetChangePacketData)(nil), "interchain_security.ccv.v1.ValidatorSetChangePacketData") proto.RegisterType((*UnbondingOp)(nil), "interchain_security.ccv.v1.UnbondingOp") proto.RegisterType((*VSCMaturedPacketData)(nil), "interchain_security.ccv.v1.VSCMaturedPacketData") proto.RegisterType((*SlashPacketData)(nil), "interchain_security.ccv.v1.SlashPacketData") + proto.RegisterType((*UnbondingOpsIndex)(nil), "interchain_security.ccv.v1.UnbondingOpsIndex") + proto.RegisterType((*MaturedUnbondingOps)(nil), "interchain_security.ccv.v1.MaturedUnbondingOps") } func init() { @@ -268,37 +361,39 @@ func init() { } var fileDescriptor_68bd5f3242e6f29c = []byte{ - // 476 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x8a, 0xd3, 0x40, - 0x18, 0xef, 0xb4, 0x22, 0x74, 0x0a, 0x75, 0x0d, 0x0b, 0xc6, 0xaa, 0xd9, 0x10, 0x16, 0xc9, 0xc5, - 0x84, 0xd4, 0xdb, 0x9e, 0xb4, 0x15, 0x61, 0x11, 0x51, 0x52, 0x77, 0x41, 0x2f, 0x61, 0x32, 0x33, - 0xa6, 0x43, 0x9b, 0x99, 0x90, 0x99, 0x04, 0xfb, 0x16, 0x3e, 0xd6, 0x1e, 0xf7, 0xe6, 0x9e, 0x16, - 0x69, 0xdf, 0xc0, 0x27, 0x90, 0x4c, 0xd2, 0xb4, 0x6a, 0x3d, 0xec, 0x29, 0x93, 0xef, 0xf7, 0x87, - 0x8f, 0x1f, 0xbf, 0x0f, 0x9e, 0x32, 0xae, 0x68, 0x8e, 0xe7, 0x88, 0xf1, 0x48, 0x52, 0x5c, 0xe4, - 0x4c, 0xad, 0x7c, 0x8c, 0x4b, 0xbf, 0x0c, 0xaa, 0x8f, 0x97, 0xe5, 0x42, 0x09, 0x63, 0x74, 0x80, - 0xe5, 0x55, 0x70, 0x19, 0x8c, 0x4e, 0xb1, 0x90, 0xa9, 0x90, 0xbe, 0x54, 0x68, 0xc1, 0x78, 0xe2, - 0x97, 0x41, 0x4c, 0x15, 0x0a, 0xb6, 0xff, 0xb5, 0xc3, 0xe8, 0x38, 0x11, 0x89, 0xd0, 0x4f, 0xbf, - 0x7a, 0x35, 0xd3, 0x27, 0x8a, 0x72, 0x42, 0xf3, 0x94, 0x71, 0xe5, 0xa3, 0x18, 0x33, 0x5f, 0xad, - 0x32, 0x2a, 0x6b, 0xd0, 0xb9, 0x01, 0xf0, 0xe9, 0x25, 0x5a, 0x32, 0x82, 0x94, 0xc8, 0x67, 0x54, - 0x4d, 0xe7, 0x88, 0x27, 0xf4, 0x23, 0xc2, 0x0b, 0xaa, 0xde, 0x20, 0x85, 0x0c, 0x01, 0x1f, 0x96, - 0x5b, 0x3c, 0x2a, 0x32, 0x82, 0x14, 0x95, 0x26, 0xb0, 0x7b, 0xee, 0x60, 0x6c, 0x7b, 0x3b, 0x67, - 0xaf, 0x72, 0xf6, 0x5a, 0xa7, 0x0b, 0x4d, 0x9c, 0xd8, 0x57, 0xb7, 0x27, 0x9d, 0x5f, 0xb7, 0x27, - 0xe6, 0x0a, 0xa5, 0xcb, 0x33, 0xe7, 0x1f, 0x23, 0x27, 0x3c, 0x2a, 0xff, 0x94, 0x48, 0xc3, 0x85, - 0xd5, 0x4c, 0x52, 0xd5, 0x90, 0x22, 0x46, 0xcc, 0xae, 0x0d, 0xdc, 0x7b, 0xe1, 0xb0, 0x9e, 0xd7, - 0xc4, 0x73, 0x62, 0x3c, 0x83, 0x50, 0x2e, 0x91, 0x9c, 0x47, 0x08, 0x2f, 0xa4, 0xd9, 0xb3, 0x7b, - 0x6e, 0x3f, 0xec, 0xeb, 0xc9, 0x6b, 0xbc, 0x90, 0xce, 0x67, 0x38, 0xb8, 0xe0, 0xb1, 0xe0, 0x84, - 0xf1, 0xe4, 0x43, 0x66, 0x0c, 0x61, 0x97, 0x11, 0x13, 0x68, 0xa7, 0x2e, 0x23, 0xc6, 0x19, 0x7c, - 0x5c, 0x6c, 0xe1, 0x08, 0x0b, 0x2e, 0x8b, 0x94, 0xe6, 0x91, 0x8e, 0x5f, 0x9a, 0x5d, 0x6d, 0xf6, - 0xa8, 0x25, 0x4c, 0x1b, 0x7c, 0xaa, 0x61, 0xe7, 0x15, 0x3c, 0xbe, 0x9c, 0x4d, 0xdf, 0x23, 0x55, - 0xe4, 0x94, 0xec, 0x85, 0x75, 0x68, 0x77, 0x70, 0x68, 0x77, 0xe7, 0x07, 0x80, 0x0f, 0x66, 0xd5, - 0xaa, 0x7b, 0xea, 0x10, 0xf6, 0xdb, 0x34, 0xb4, 0x6c, 0x30, 0x1e, 0xfd, 0x3f, 0xe2, 0x89, 0xd9, - 0x84, 0x7b, 0xf4, 0x57, 0xb8, 0x4e, 0xb8, 0xb3, 0xb9, 0x43, 0x9a, 0x6f, 0x21, 0x64, 0xfc, 0x6b, - 0x8e, 0xb0, 0x62, 0x82, 0x9b, 0x3d, 0x1b, 0xb8, 0xc3, 0xf1, 0x73, 0xaf, 0xee, 0x9d, 0xb7, 0xed, - 0x59, 0xd3, 0x3b, 0xef, 0xbc, 0x65, 0x7e, 0x5a, 0x65, 0x34, 0xdc, 0x53, 0x4e, 0xde, 0x5d, 0xad, - 0x2d, 0x70, 0xbd, 0xb6, 0xc0, 0xcf, 0xb5, 0x05, 0xbe, 0x6f, 0xac, 0xce, 0xf5, 0xc6, 0xea, 0xdc, - 0x6c, 0xac, 0xce, 0x97, 0x20, 0x61, 0x6a, 0x5e, 0xc4, 0x1e, 0x16, 0xa9, 0xdf, 0xf4, 0x79, 0x57, - 0xf9, 0x17, 0xed, 0x61, 0x7c, 0xd3, 0xa7, 0xa1, 0x4b, 0x1a, 0xdf, 0xd7, 0x2d, 0x7d, 0xf9, 0x3b, - 0x00, 0x00, 0xff, 0xff, 0x47, 0xac, 0x22, 0x45, 0x42, 0x03, 0x00, 0x00, + // 509 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xee, 0xb4, 0x8b, 0xb0, 0x53, 0xa8, 0xdd, 0xb8, 0x60, 0xac, 0x9a, 0x0d, 0x61, 0xd5, 0x5e, + 0x4c, 0x68, 0xbd, 0xed, 0x49, 0x5b, 0x11, 0x8a, 0x88, 0x92, 0xba, 0x0b, 0x7a, 0x09, 0xd3, 0x99, + 0x31, 0x1d, 0xda, 0xcc, 0x84, 0xcc, 0x24, 0x6c, 0xff, 0x85, 0x3f, 0x6b, 0x8f, 0x7b, 0x73, 0x4f, + 0x8b, 0xb4, 0xff, 0xc0, 0x5f, 0x20, 0x99, 0xa4, 0x69, 0x74, 0xeb, 0xc1, 0xd3, 0xbc, 0x79, 0xdf, + 0xf7, 0x3e, 0x1e, 0x1f, 0xdf, 0x83, 0xa7, 0x8c, 0x2b, 0x9a, 0xe0, 0x39, 0x62, 0x3c, 0x90, 0x14, + 0xa7, 0x09, 0x53, 0x2b, 0x0f, 0xe3, 0xcc, 0xcb, 0x06, 0xf9, 0xe3, 0xc6, 0x89, 0x50, 0xc2, 0xe8, + 0xed, 0x61, 0xb9, 0x39, 0x9c, 0x0d, 0x7a, 0xa7, 0x58, 0xc8, 0x48, 0x48, 0x4f, 0x2a, 0xb4, 0x60, + 0x3c, 0xf4, 0xb2, 0xc1, 0x8c, 0x2a, 0x34, 0xd8, 0xfe, 0x0b, 0x85, 0xde, 0x71, 0x28, 0x42, 0xa1, + 0x4b, 0x2f, 0xaf, 0xca, 0xee, 0x63, 0x45, 0x39, 0xa1, 0x49, 0xc4, 0xb8, 0xf2, 0xd0, 0x0c, 0x33, + 0x4f, 0xad, 0x62, 0x2a, 0x0b, 0xd0, 0xb9, 0x01, 0xf0, 0xc9, 0x05, 0x5a, 0x32, 0x82, 0x94, 0x48, + 0xa6, 0x54, 0x8d, 0xe7, 0x88, 0x87, 0xf4, 0x13, 0xc2, 0x0b, 0xaa, 0xde, 0x22, 0x85, 0x0c, 0x01, + 0x8f, 0xb2, 0x2d, 0x1e, 0xa4, 0x31, 0x41, 0x8a, 0x4a, 0x13, 0xd8, 0xad, 0x7e, 0x7b, 0x68, 0xbb, + 0x3b, 0x65, 0x37, 0x57, 0x76, 0x2b, 0xa5, 0x73, 0x4d, 0x1c, 0xd9, 0x57, 0xb7, 0x27, 0x8d, 0x5f, + 0xb7, 0x27, 0xe6, 0x0a, 0x45, 0xcb, 0x33, 0xe7, 0x8e, 0x90, 0xe3, 0x77, 0xb3, 0x3f, 0x47, 0xa4, + 0xd1, 0x87, 0x79, 0x4f, 0x52, 0x55, 0x92, 0x02, 0x46, 0xcc, 0xa6, 0x0d, 0xfa, 0x07, 0x7e, 0xa7, + 0xe8, 0x17, 0xc4, 0x09, 0x31, 0x9e, 0x42, 0x28, 0x97, 0x48, 0xce, 0x03, 0x84, 0x17, 0xd2, 0x6c, + 0xd9, 0xad, 0xfe, 0xa1, 0x7f, 0xa8, 0x3b, 0x6f, 0xf0, 0x42, 0x3a, 0x5f, 0x60, 0xfb, 0x9c, 0xcf, + 0x04, 0x27, 0x8c, 0x87, 0x1f, 0x63, 0xa3, 0x03, 0x9b, 0x8c, 0x98, 0x40, 0x2b, 0x35, 0x19, 0x31, + 0xce, 0xe0, 0xa3, 0x74, 0x0b, 0x07, 0x58, 0x70, 0x99, 0x46, 0x34, 0x09, 0xb4, 0xfd, 0xd2, 0x6c, + 0x6a, 0xb1, 0x87, 0x15, 0x61, 0x5c, 0xe2, 0x63, 0x0d, 0x3b, 0xaf, 0xe1, 0xf1, 0xc5, 0x74, 0xfc, + 0x01, 0xa9, 0x34, 0xa1, 0xa4, 0x66, 0xd6, 0xbe, 0xdd, 0xc1, 0xbe, 0xdd, 0x9d, 0x1f, 0x00, 0xde, + 0x9f, 0xe6, 0xab, 0xd6, 0xa6, 0x7d, 0x78, 0x58, 0xb9, 0xa1, 0xc7, 0xda, 0xc3, 0xde, 0xbf, 0x2d, + 0x1e, 0x99, 0xa5, 0xb9, 0xdd, 0xbf, 0xcc, 0x75, 0xfc, 0x9d, 0xcc, 0x7f, 0xb8, 0xf9, 0x0e, 0x42, + 0xc6, 0xbf, 0x25, 0x08, 0x2b, 0x26, 0xb8, 0xd9, 0xb2, 0x41, 0xbf, 0x33, 0x7c, 0xee, 0x16, 0xb9, + 0x73, 0xb7, 0x39, 0x2b, 0x73, 0xe7, 0x4e, 0x2a, 0xe6, 0xe7, 0x55, 0x4c, 0xfd, 0xda, 0xa4, 0xf3, + 0x0c, 0x1e, 0xd5, 0x6c, 0x97, 0x13, 0x4e, 0xe8, 0xa5, 0xd1, 0x85, 0x2d, 0x46, 0x8a, 0xdc, 0x1c, + 0xf8, 0x79, 0xe9, 0xbc, 0x80, 0x0f, 0x4a, 0xff, 0xea, 0xec, 0xbb, 0xc4, 0xd1, 0xfb, 0xab, 0xb5, + 0x05, 0xae, 0xd7, 0x16, 0xf8, 0xb9, 0xb6, 0xc0, 0xf7, 0x8d, 0xd5, 0xb8, 0xde, 0x58, 0x8d, 0x9b, + 0x8d, 0xd5, 0xf8, 0x3a, 0x08, 0x99, 0x9a, 0xa7, 0x33, 0x17, 0x8b, 0xc8, 0x2b, 0xef, 0x63, 0x77, + 0x42, 0x2f, 0xab, 0x43, 0xbb, 0xd4, 0xa7, 0xa6, 0x43, 0x3f, 0xbb, 0xa7, 0x53, 0xff, 0xea, 0x77, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xaa, 0xb0, 0x21, 0x92, 0x03, 0x00, 0x00, } func (m *ValidatorSetChangePacketData) Marshal() (dAtA []byte, err error) { @@ -460,6 +555,88 @@ func (m *SlashPacketData) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *UnbondingOpsIndex) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UnbondingOpsIndex) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UnbondingOpsIndex) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ids) > 0 { + dAtA3 := make([]byte, len(m.Ids)*10) + var j2 int + for _, num := range m.Ids { + for num >= 1<<7 { + dAtA3[j2] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j2++ + } + dAtA3[j2] = uint8(num) + j2++ + } + i -= j2 + copy(dAtA[i:], dAtA3[:j2]) + i = encodeVarintCcv(dAtA, i, uint64(j2)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MaturedUnbondingOps) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MaturedUnbondingOps) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MaturedUnbondingOps) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ids) > 0 { + dAtA5 := make([]byte, len(m.Ids)*10) + var j4 int + for _, num := range m.Ids { + for num >= 1<<7 { + dAtA5[j4] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j4++ + } + dAtA5[j4] = uint8(num) + j4++ + } + i -= j4 + copy(dAtA[i:], dAtA5[:j4]) + i = encodeVarintCcv(dAtA, i, uint64(j4)) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintCcv(dAtA []byte, offset int, v uint64) int { offset -= sovCcv(v) base := offset @@ -542,6 +719,38 @@ func (m *SlashPacketData) Size() (n int) { return n } +func (m *UnbondingOpsIndex) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ids) > 0 { + l = 0 + for _, e := range m.Ids { + l += sovCcv(uint64(e)) + } + n += 1 + sovCcv(uint64(l)) + l + } + return n +} + +func (m *MaturedUnbondingOps) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ids) > 0 { + l = 0 + for _, e := range m.Ids { + l += sovCcv(uint64(e)) + } + n += 1 + sovCcv(uint64(l)) + l + } + return n +} + func sovCcv(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -974,6 +1183,258 @@ func (m *SlashPacketData) Unmarshal(dAtA []byte) error { } return nil } +func (m *UnbondingOpsIndex) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnbondingOpsIndex: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnbondingOpsIndex: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ids = append(m.Ids, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthCcv + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthCcv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ids) == 0 { + m.Ids = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ids = append(m.Ids, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipCcv(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCcv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MaturedUnbondingOps) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MaturedUnbondingOps: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MaturedUnbondingOps: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ids = append(m.Ids, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthCcv + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthCcv + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ids) == 0 { + m.Ids = make([]uint64, 0, elementCount) + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCcv + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ids = append(m.Ids, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ids", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipCcv(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCcv + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipCcv(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/x/ccv/types/errors.go b/x/ccv/types/errors.go index 9af7b8a32a..f20f294f1e 100644 --- a/x/ccv/types/errors.go +++ b/x/ccv/types/errors.go @@ -19,7 +19,7 @@ var ( ErrInvalidVSCMaturedTime = sdkerrors.Register(ModuleName, 12, "invalid maturity time for VSC packet") ErrInvalidConsumerState = sdkerrors.Register(ModuleName, 13, "provider chain has invalid state for consumer chain") ErrInvalidConsumerClient = sdkerrors.Register(ModuleName, 14, "ccv channel is not built on correct client") - ErrInvalidProposal = sdkerrors.Register(ModuleName, 15, "invalid create consumer chain proposal") + ErrInvalidProposal = sdkerrors.Register(ModuleName, 15, "invalid proposal") ErrInvalidHandshakeMetadata = sdkerrors.Register(ModuleName, 16, "invalid provider handshake metadata") ErrChannelNotFound = sdkerrors.Register(ModuleName, 17, "channel not found") ErrClientNotFound = sdkerrors.Register(ModuleName, 18, "client not found")