diff --git a/chains/tendermint/msg.go b/chains/tendermint/msg.go index 532bb3b3..f412119b 100644 --- a/chains/tendermint/msg.go +++ b/chains/tendermint/msg.go @@ -146,6 +146,13 @@ func parseMsgEventLog(ev abcitypes.Event) (core.MsgEventLog, error) { return nil, err } return &event, nil + case chantypes.EventTypeChannelUpgradeOpen: + var event core.EventUpgradeChannel + var err0, err1, err2 error + event.PortID, err0 = getAttributeString(ev, chantypes.AttributeKeyPortID) + event.ChannelID, err1 = getAttributeString(ev, chantypes.AttributeKeyChannelID) + event.UpgradeSequence, err2 = getAttributeUint64(ev, chantypes.AttributeKeyUpgradeSequence) + return &event, errors.Join(err0, err1, err2) default: return &core.EventUnknown{Value: ev}, nil } diff --git a/chains/tendermint/query.go b/chains/tendermint/query.go index a1f5cc8d..3a8e900d 100644 --- a/chains/tendermint/query.go +++ b/chains/tendermint/query.go @@ -27,6 +27,8 @@ import ( committypes "github.com/cosmos/ibc-go/v8/modules/core/23-commitment/types" ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" "github.com/hyperledger-labs/yui-relayer/core" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // QueryClientState retrevies the latest consensus state for a client in state at a given height @@ -55,12 +57,12 @@ var emptyConnRes = conntypes.NewQueryConnectionResponse( ) // QueryConnection returns the remote end of a given connection -func (c *Chain) QueryConnection(ctx core.QueryContext) (*conntypes.QueryConnectionResponse, error) { - return c.queryConnection(int64(ctx.Height().GetRevisionHeight()), false) +func (c *Chain) QueryConnection(ctx core.QueryContext, connectionID string) (*conntypes.QueryConnectionResponse, error) { + return c.queryConnection(int64(ctx.Height().GetRevisionHeight()), connectionID, false) } -func (c *Chain) queryConnection(height int64, prove bool) (*conntypes.QueryConnectionResponse, error) { - res, err := connutils.QueryConnection(c.CLIContext(height), c.PathEnd.ConnectionID, prove) +func (c *Chain) queryConnection(height int64, connectionID string, prove bool) (*conntypes.QueryConnectionResponse, error) { + res, err := connutils.QueryConnection(c.CLIContext(height), connectionID, prove) if err != nil && strings.Contains(err.Error(), "not found") { return emptyConnRes, nil } else if err != nil { @@ -354,11 +356,13 @@ func (c *Chain) queryWrittenAcknowledgement(ctx core.QueryContext, seq uint64) ( } // QueryTxs returns an array of transactions given a tag -func (c *Chain) QueryTxs(height int64, page, limit int, events []string) ([]*ctypes.ResultTx, error) { +func (c *Chain) QueryTxs(maxHeight int64, page, limit int, events []string) ([]*ctypes.ResultTx, error) { if len(events) == 0 { return nil, errors.New("must declare at least one event to search") } + events = append(events, fmt.Sprintf("tx.height<=%d", maxHeight)) + if page <= 0 { return nil, errors.New("page must greater than 0") } @@ -374,6 +378,65 @@ func (c *Chain) QueryTxs(height int64, page, limit int, events []string) ([]*cty return res.Txs, nil } +func (c *Chain) QueryChannelUpgrade(ctx core.QueryContext) (*chantypes.QueryUpgradeResponse, error) { + return c.queryChannelUpgrade(int64(ctx.Height().GetRevisionHeight()), false) +} + +func (c *Chain) queryChannelUpgrade(height int64, prove bool) (chanRes *chantypes.QueryUpgradeResponse, err error) { + if res, err := chanutils.QueryUpgrade( + c.CLIContext(height), + c.PathEnd.PortID, + c.PathEnd.ChannelID, + prove, + ); err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { + return nil, nil + } else { + return nil, err + } + } else { + return res, nil + } +} + +func (c *Chain) QueryChannelUpgradeError(ctx core.QueryContext) (*chantypes.QueryUpgradeErrorResponse, error) { + return c.queryChannelUpgradeError(int64(ctx.Height().GetRevisionHeight()), false) +} + +func (c *Chain) queryChannelUpgradeError(height int64, prove bool) (chanRes *chantypes.QueryUpgradeErrorResponse, err error) { + if res, err := chanutils.QueryUpgradeError( + c.CLIContext(height), + c.PathEnd.PortID, + c.PathEnd.ChannelID, + prove, + ); err != nil { + if st, ok := status.FromError(err); ok && st.Code() == codes.NotFound { + return nil, nil + } else { + return nil, err + } + } else { + return res, nil + } +} + +func (c *Chain) QueryCanTransitionToFlushComplete(ctx core.QueryContext) (bool, error) { + return c.queryCanTransitionToFlushComplete(int64(ctx.Height().GetRevisionHeight())) +} + +func (c *Chain) queryCanTransitionToFlushComplete(height int64) (bool, error) { + queryClient := chantypes.NewQueryClient(c.CLIContext(height)) + req := chantypes.QueryPacketCommitmentsRequest{ + PortId: c.PathEnd.PortID, + ChannelId: c.PathEnd.ChannelID, + } + if res, err := queryClient.PacketCommitments(context.TODO(), &req); err != nil { + return false, err + } else { + return len(res.Commitments) == 0, nil + } +} + ///////////////////////////////////// // STAKING -> HistoricalInfo // ///////////////////////////////////// @@ -470,3 +533,18 @@ func recvPacketQuery(channelID string, seq int) []string { func writeAckQuery(channelID string, seq int) []string { return []string{fmt.Sprintf("%s.packet_dst_channel='%s'", waTag, channelID), fmt.Sprintf("%s.packet_sequence='%d'", waTag, seq)} } + +func channelUpgradeErrorQuery(channelID string, upgradeSequence uint64) []string { + return []string{ + fmt.Sprintf("%s.%s='%s'", + chantypes.EventTypeChannelUpgradeError, + chantypes.AttributeKeyChannelID, + channelID, + ), + fmt.Sprintf("%s.%s='%d'", + chantypes.EventTypeChannelUpgradeError, + chantypes.AttributeKeyUpgradeSequence, + upgradeSequence, + ), + } +} diff --git a/cmd/query.go b/cmd/query.go index 6d3ed90f..364f882d 100644 --- a/cmd/query.go +++ b/cmd/query.go @@ -7,13 +7,14 @@ import ( "github.com/cosmos/cosmos-sdk/client/flags" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/gogoproto/jsonpb" clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" + chantypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" ibcexported "github.com/cosmos/ibc-go/v8/modules/core/exported" "github.com/hyperledger-labs/yui-relayer/config" "github.com/hyperledger-labs/yui-relayer/core" "github.com/hyperledger-labs/yui-relayer/helpers" "github.com/spf13/cobra" - "github.com/cosmos/gogoproto/jsonpb" ) // queryCmd represents the chain command @@ -33,6 +34,7 @@ func queryCmd(ctx *config.Context) *cobra.Command { queryClientCmd(ctx), queryConnection(ctx), queryChannel(ctx), + queryChannelUpgrade(ctx), ) return cmd @@ -69,7 +71,7 @@ func queryClientCmd(ctx *config.Context) *cobra.Command { } marshaler := jsonpb.Marshaler{} if json, err := marshaler.MarshalToString(cs); err != nil { - fmt.Println(cs.String()) + return err } else { fmt.Println(json) } @@ -101,13 +103,13 @@ func queryConnection(ctx *config.Context) *cobra.Command { return err } queryHeight := clienttypes.NewHeight(latestHeight.GetRevisionNumber(), uint64(height)) - res, err := c.QueryConnection(core.NewQueryContext(context.TODO(), queryHeight)) + res, err := c.QueryConnection(core.NewQueryContext(context.TODO(), queryHeight), c.Path().ConnectionID) if err != nil { return err } marshaler := jsonpb.Marshaler{} if json, err := marshaler.MarshalToString(res.Connection); err != nil { - fmt.Println(res.Connection.String()) + return err } else { fmt.Println(json) } @@ -121,7 +123,7 @@ func queryConnection(ctx *config.Context) *cobra.Command { func queryChannel(ctx *config.Context) *cobra.Command { cmd := &cobra.Command{ Use: "channel [path-name] [chain-id]", - Short: "Query the connection state for the given connection id", + Short: "Query the channel state for the given connection id", Args: cobra.ExactArgs(2), RunE: func(cmd *cobra.Command, args []string) error { chains, _, _, err := ctx.Config.ChainsFromPath(args[0]) @@ -143,9 +145,51 @@ func queryChannel(ctx *config.Context) *cobra.Command { if err != nil { return err } + marshaler := jsonpb.Marshaler{} if json, err := marshaler.MarshalToString(res.Channel); err != nil { - fmt.Println(res.Channel.String()) + return err + } else { + fmt.Println(json) + } + return nil + }, + } + + return heightFlag(cmd) +} + +func queryChannelUpgrade(ctx *config.Context) *cobra.Command { + cmd := &cobra.Command{ + Use: "channel-upgrade [path-name] [chain-id]", + Short: "Query the channel upgrade state for the given channel id", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + chains, _, _, err := ctx.Config.ChainsFromPath(args[0]) + if err != nil { + return err + } + c := chains[args[1]] + + height, err := cmd.Flags().GetUint64(flags.FlagHeight) + if err != nil { + return err + } + latestHeight, err := c.LatestHeight() + if err != nil { + return err + } + queryHeight := clienttypes.NewHeight(latestHeight.GetRevisionNumber(), uint64(height)) + res, err := c.QueryChannelUpgrade(core.NewQueryContext(context.TODO(), queryHeight)) + if err != nil { + return err + } else if res == nil { + res = &chantypes.QueryUpgradeResponse{} + } + + marshaler := jsonpb.Marshaler{} + if json, err := marshaler.MarshalToString(&res.Upgrade); err != nil { + return err } else { fmt.Println(json) } diff --git a/cmd/tx.go b/cmd/tx.go index cb554a20..59400686 100644 --- a/cmd/tx.go +++ b/cmd/tx.go @@ -2,15 +2,19 @@ package cmd import ( "context" + "errors" "fmt" "strings" + "time" "github.com/cosmos/cosmos-sdk/client/flags" clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" + chantypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" "github.com/cosmos/ibc-go/v8/modules/core/exported" "github.com/hyperledger-labs/yui-relayer/config" "github.com/hyperledger-labs/yui-relayer/core" "github.com/spf13/cobra" + "github.com/spf13/pflag" "github.com/spf13/viper" ) @@ -35,6 +39,7 @@ func transactionCmd(ctx *config.Context) *cobra.Command { updateClientsCmd(ctx), createConnectionCmd(ctx), createChannelCmd(ctx), + channelUpgradeCmd(ctx), ) return cmd @@ -197,6 +202,217 @@ func createChannelCmd(ctx *config.Context) *cobra.Command { return timeoutFlag(cmd) } +func channelUpgradeCmd(ctx *config.Context) *cobra.Command { + cmd := &cobra.Command{ + Use: "channel-upgrade", + Short: "execute operations related to IBC channel upgrade", + Long: "This command is meant to be used to upgrade a channel between two chains with a configured path in the config file", + RunE: noCommand, + } + + cmd.AddCommand( + channelUpgradeInitCmd(ctx), + channelUpgradeExecuteCmd(ctx), + channelUpgradeCancelCmd(ctx), + ) + + return cmd +} + +func channelUpgradeInitCmd(ctx *config.Context) *cobra.Command { + const ( + flagOrdering = "ordering" + flagConnectionHops = "connection-hops" + flagVersion = "version" + flagUnsafe = "unsafe" + ) + + cmd := cobra.Command{ + Use: "init [path-name] [chain-id]", + Short: "execute chanUpgradeInit", + Long: "This command is meant to be used to initialize an IBC channel upgrade on a configured chain", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + pathName := args[0] + chainID := args[1] + + chains, srcID, dstID, err := ctx.Config.ChainsFromPath(pathName) + if err != nil { + return err + } + + var chain, cp *core.ProvableChain + switch chainID { + case srcID: + chain = chains[srcID] + cp = chains[dstID] + case dstID: + chain = chains[dstID] + cp = chains[srcID] + default: + return fmt.Errorf("unknown chain ID: %s", chainID) + } + + // check cp state + if unsafe, err := cmd.Flags().GetBool(flagUnsafe); err != nil { + return err + } else if !unsafe { + if height, err := cp.LatestHeight(); err != nil { + return err + } else if chann, err := cp.QueryChannel(core.NewQueryContext(cmd.Context(), height)); err != nil { + return err + } else if state := chann.Channel.State; state >= chantypes.FLUSHING && state <= chantypes.FLUSHCOMPLETE { + return fmt.Errorf("stop channel upgrade initialization because the counterparty is in %v state", state) + } + } + + // get ordering from flags + ordering, err := getOrderFromFlags(cmd.Flags(), flagOrdering) + if err != nil { + return err + } else if ordering == chantypes.NONE { + return errors.New("NONE is unacceptable channel ordering") + } + + // get connection hops from flags + connHops, err := cmd.Flags().GetStringSlice(flagConnectionHops) + if err != nil { + return err + } + + // get version from flags + version, err := cmd.Flags().GetString(flagVersion) + if err != nil { + return err + } + + return core.InitChannelUpgrade(chain, chantypes.UpgradeFields{ + Ordering: ordering, + ConnectionHops: connHops, + Version: version, + }) + }, + } + + cmd.Flags().String(flagOrdering, "", "channel ordering applied for the new channel") + cmd.Flags().StringSlice(flagConnectionHops, nil, "connection hops applied for the new channel") + cmd.Flags().String(flagVersion, "", "channel version applied for the new channel") + cmd.Flags().Bool(flagUnsafe, false, "set true if you want to allow for initializing a new channel upgrade even though the counterparty chain is still flushing packets.") + + return &cmd +} + +func channelUpgradeExecuteCmd(ctx *config.Context) *cobra.Command { + const ( + flagInterval = "interval" + flagTargetSrcState = "target-src-state" + flagTargetDstState = "target-dst-state" + ) + + const ( + defaultInterval = time.Second + defaultTargetState = "UNINIT" + ) + + cmd := cobra.Command{ + Use: "execute [path-name]", + Short: "execute channel upgrade handshake", + Long: "This command is meant to be used to execute an IBC channel upgrade handshake between two chains with a configured path in the config file", + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + pathName := args[0] + chains, srcChainID, dstChainID, err := ctx.Config.ChainsFromPath(pathName) + if err != nil { + return err + } + + src, ok := chains[srcChainID] + if !ok { + panic("src chain not found") + } + dst, ok := chains[dstChainID] + if !ok { + panic("dst chain not found") + } + + interval, err := cmd.Flags().GetDuration(flagInterval) + if err != nil { + return err + } + + targetSrcState, err := getUpgradeStateFromFlags(cmd.Flags(), flagTargetSrcState) + if err != nil { + return err + } + + targetDstState, err := getUpgradeStateFromFlags(cmd.Flags(), flagTargetDstState) + if err != nil { + return err + } + + return core.ExecuteChannelUpgrade(pathName, src, dst, interval, targetSrcState, targetDstState) + }, + } + + cmd.Flags().Duration(flagInterval, defaultInterval, "the interval between attempts to proceed the upgrade handshake") + cmd.Flags().String(flagTargetSrcState, defaultTargetState, "the source channel's upgrade state to be reached") + cmd.Flags().String(flagTargetDstState, defaultTargetState, "the destination channel's upgrade state to be reached") + + return &cmd +} + +func channelUpgradeCancelCmd(ctx *config.Context) *cobra.Command { + const ( + flagSettlementInterval = "settlement-interval" + ) + + cmd := cobra.Command{ + Use: "cancel [path-name] [chain-id]", + Short: "execute chanUpgradeCancel", + Long: "This command is meant to be used to cancel an IBC channel upgrade on a configured chain", + Args: cobra.ExactArgs(2), + RunE: func(cmd *cobra.Command, args []string) error { + pathName := args[0] + chainID := args[1] + + settlementInterval, err := cmd.Flags().GetDuration(flagSettlementInterval) + if err != nil { + return err + } + + _, srcChainID, dstChainID, err := ctx.Config.ChainsFromPath(pathName) + if err != nil { + return err + } + + var cpChainID string + switch chainID { + case srcChainID: + cpChainID = dstChainID + case dstChainID: + cpChainID = srcChainID + default: + return fmt.Errorf("invalid chain ID: %s or %s was expected, but %s was given", srcChainID, dstChainID, chainID) + } + + chain, err := ctx.Config.GetChain(chainID) + if err != nil { + return err + } + cp, err := ctx.Config.GetChain(cpChainID) + if err != nil { + return err + } + + return core.CancelChannelUpgrade(chain, cp, settlementInterval) + }, + } + + cmd.Flags().Duration(flagSettlementInterval, 10*time.Second, "time interval between attemts to query for settled channel/upgrade states") + + return &cmd +} + func relayMsgsCmd(ctx *config.Context) *cobra.Command { const ( flagDoRefresh = "do-refresh" @@ -372,3 +588,38 @@ func getUint64Slice(key string) []uint64 { } return ret } + +func getUpgradeStateFromFlags(flags *pflag.FlagSet, flagName string) (core.UpgradeState, error) { + s, err := flags.GetString(flagName) + if err != nil { + return 0, err + } + + switch strings.ToUpper(s) { + case "UNINIT": + return core.UPGRADE_STATE_UNINIT, nil + case "INIT": + return core.UPGRADE_STATE_INIT, nil + case "FLUSHING": + return core.UPGRADE_STATE_FLUSHING, nil + case "FLUSHCOMPLETE": + return core.UPGRADE_STATE_FLUSHCOMPLETE, nil + default: + return 0, fmt.Errorf("invalid upgrade state specified: %s", s) + } +} + +func getOrderFromFlags(flags *pflag.FlagSet, flagName string) (chantypes.Order, error) { + s, err := flags.GetString(flagName) + if err != nil { + return 0, err + } + + s = "ORDER_" + strings.ToUpper(s) + value, ok := chantypes.Order_value[s] + if !ok { + return 0, fmt.Errorf("invalid channel order specified: %s", s) + } + + return chantypes.Order(value), nil +} diff --git a/config/core.go b/config/core.go index 08055b99..d3d928e2 100644 --- a/config/core.go +++ b/config/core.go @@ -19,31 +19,37 @@ func initCoreConfig(c *Config) { core.SetCoreConfig(config) } -func (c CoreConfig) UpdateConfigID(pathName string, chainID string, configID core.ConfigIDType, id string) error { +func (c CoreConfig) UpdatePathConfig(pathName string, chainID string, kv map[core.PathConfigKey]string) error { configPath, err := c.config.Paths.Get(pathName) if err != nil { return err } + var pathEnd *core.PathEnd if chainID == configPath.Src.ChainID { pathEnd = configPath.Src - } - if chainID == configPath.Dst.ChainID { + } else if chainID == configPath.Dst.ChainID { pathEnd = configPath.Dst - } - if pathEnd == nil { + } else { return fmt.Errorf("pathEnd is nil") } - switch configID { - case core.ConfigIDClient: - pathEnd.ClientID = id - case core.ConfigIDConnection: - pathEnd.ConnectionID = id - case core.ConfigIDChannel: - pathEnd.ChannelID = id - } - if err := c.config.OverWriteConfig(); err != nil { - return err + + for k, v := range kv { + switch k { + case core.PathConfigClientID: + pathEnd.ClientID = v + case core.PathConfigConnectionID: + pathEnd.ConnectionID = v + case core.PathConfigChannelID: + pathEnd.ChannelID = v + case core.PathConfigOrder: + pathEnd.Order = v + case core.PathConfigVersion: + pathEnd.Version = v + default: + panic(fmt.Sprintf("unexpected path config key: %s", k)) + } } - return nil + + return c.config.OverWriteConfig() } diff --git a/core/chain.go b/core/chain.go index b5018d03..684b4fcf 100644 --- a/core/chain.go +++ b/core/chain.go @@ -136,7 +136,7 @@ type ICS02Querier interface { // ICS03Querier is an interface to the state of ICS-03 type ICS03Querier interface { // QueryConnection returns the remote end of a given connection - QueryConnection(ctx QueryContext) (*conntypes.QueryConnectionResponse, error) + QueryConnection(ctx QueryContext, connectionID string) (*conntypes.QueryConnectionResponse, error) } // ICS04Querier is an interface to the state of ICS-04 @@ -155,6 +155,18 @@ type ICS04Querier interface { // QueryUnfinalizedRelayedAcknowledgements returns acks and heights that are sent but not received at the latest finalized block on the counterpartychain QueryUnfinalizedRelayAcknowledgements(ctx QueryContext, counterparty LightClientICS04Querier) (PacketInfoList, error) + + // QueryChannelUpgrade returns the channel upgrade associated with a channelID + QueryChannelUpgrade(ctx QueryContext) (*chantypes.QueryUpgradeResponse, error) + + // QueryChannelUpgradeError returns the channel upgrade error receipt associated with a channelID at the height of `ctx`. + // WARN: This error receipt may not be used to cancel upgrade in FLUSHCOMPLETE state because of upgrade sequence mismatch. + QueryChannelUpgradeError(ctx QueryContext) (*chantypes.QueryUpgradeErrorResponse, error) + + // QueryCanTransitionToFlushComplete returns the channel can transition to FLUSHCOMPLETE state. + // Basically it requires that there remains no inflight packets. + // Maybe additional condition for transition is required by the IBC/APP module. + QueryCanTransitionToFlushComplete(ctx QueryContext) (bool, error) } // ICS20Querier is an interface to the state of ICS-20 diff --git a/core/channel-upgrade.go b/core/channel-upgrade.go new file mode 100644 index 00000000..7930a393 --- /dev/null +++ b/core/channel-upgrade.go @@ -0,0 +1,747 @@ +package core + +import ( + "context" + "errors" + "fmt" + "log/slog" + "time" + + retry "github.com/avast/retry-go" + sdk "github.com/cosmos/cosmos-sdk/types" + clienttypes "github.com/cosmos/ibc-go/v8/modules/core/02-client/types" + chantypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" + "github.com/hyperledger-labs/yui-relayer/log" +) + +type UpgradeState int + +const ( + UPGRADE_STATE_UNINIT UpgradeState = iota + UPGRADE_STATE_INIT + UPGRADE_STATE_FLUSHING + UPGRADE_STATE_FLUSHCOMPLETE +) + +func (state UpgradeState) String() string { + switch state { + case UPGRADE_STATE_UNINIT: + return "UNINIT" + case UPGRADE_STATE_INIT: + return "INIT" + case UPGRADE_STATE_FLUSHING: + return "FLUSHING" + case UPGRADE_STATE_FLUSHCOMPLETE: + return "FLUSHCOMPLETE" + default: + panic(fmt.Errorf("unexpected UpgradeState: %d", state)) + } +} + +type UpgradeAction int + +const ( + UPGRADE_ACTION_NONE UpgradeAction = iota + UPGRADE_ACTION_TRY + UPGRADE_ACTION_ACK + UPGRADE_ACTION_CONFIRM + UPGRADE_ACTION_OPEN + UPGRADE_ACTION_CANCEL + UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE + UPGRADE_ACTION_TIMEOUT +) + +func (action UpgradeAction) String() string { + switch action { + case UPGRADE_ACTION_NONE: + return "NONE" + case UPGRADE_ACTION_TRY: + return "TRY" + case UPGRADE_ACTION_ACK: + return "ACK" + case UPGRADE_ACTION_CONFIRM: + return "CONFIRM" + case UPGRADE_ACTION_OPEN: + return "OPEN" + case UPGRADE_ACTION_CANCEL: + return "CANCEL" + case UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE: + return "CANCEL_FLUSHCOMPLETE" + case UPGRADE_ACTION_TIMEOUT: + return "TIMEOUT" + default: + panic(fmt.Errorf("unexpected UpgradeAction: %d", action)) + } +} + +// InitChannelUpgrade builds `MsgChannelUpgradeInit` based on the specified UpgradeFields and sends it to the specified chain. +func InitChannelUpgrade(chain *ProvableChain, upgradeFields chantypes.UpgradeFields) error { + logger := GetChannelLogger(chain.Chain) + defer logger.TimeTrack(time.Now(), "InitChannelUpgrade") + + addr, err := chain.GetAddress() + if err != nil { + logger.Error("failed to get address", err) + return err + } + + msg := chain.Path().ChanUpgradeInit(upgradeFields, addr) + + if _, err := chain.SendMsgs([]sdk.Msg{msg}); err != nil { + logger.Error("failed to send MsgChannelUpgradeInit", err) + return err + } else { + logger.Info("successfully initialized channel upgrade") + } + + return nil +} + +// ExecuteChannelUpgrade carries out channel upgrade handshake until both chains transition to the OPEN state. +// This function repeatedly checks the states of both chains and decides the next action. +func ExecuteChannelUpgrade(pathName string, src, dst *ProvableChain, interval time.Duration, targetSrcState, targetDstState UpgradeState) error { + logger := GetChannelPairLogger(src, dst) + defer logger.TimeTrack(time.Now(), "ExecuteChannelUpgrade") + + ticker := time.NewTicker(interval) + defer ticker.Stop() + + failures := 0 + firstCall := true + for { + if !firstCall { + <-ticker.C + } + + steps, err := upgradeChannelStep(src, dst, targetSrcState, targetDstState, firstCall) + if err != nil { + logger.Error("failed to create channel upgrade step", err) + return err + } + + firstCall = false + + if steps.Last { + logger.Info("Channel upgrade completed") + return nil + } + + if !steps.Ready() { + logger.Debug("Waiting for next channel upgrade step ...") + continue + } + + steps.Send(src, dst) + + if steps.Success() { + if err := SyncChainConfigsFromEvents(pathName, steps.SrcMsgIDs, steps.DstMsgIDs, src, dst); err != nil { + logger.Error("failed to synchronize the updated path config to the config file", err) + return err + } + + failures = 0 + } else { + if failures++; failures > 2 { + err := errors.New("channel upgrade failed") + logger.Error(err.Error(), err) + return err + } + + logger.Warn("Retrying transaction...") + } + } +} + +// CancelChannelUpgrade executes chanUpgradeCancel on `chain`. +func CancelChannelUpgrade(chain, cp *ProvableChain, settlementInterval time.Duration) error { + logger := GetChannelPairLogger(chain, cp) + defer logger.TimeTrack(time.Now(), "CancelChannelUpgrade") + + // wait for settlement + ticker := time.NewTicker(settlementInterval) + defer ticker.Stop() + + for { + sh, err := NewSyncHeaders(chain, cp) + if err != nil { + logger.Error("failed to create a SyncHeaders", err) + return err + } + ctx := sh.GetQueryContext(chain.ChainID()) + cpCtx := sh.GetQueryContext(cp.ChainID()) + + chann, _, settled, err := querySettledChannelPair(ctx, cpCtx, chain, cp, false) + if err != nil { + logger.Error("failed to query for settled channel pair", err) + return err + } else if !settled { + logger.Info("waiting for settlement of channel pair ...") + <-ticker.C + continue + } + + if _, _, settled, err := querySettledChannelUpgradePair(ctx, cpCtx, chain, cp, false); err != nil { + logger.Error("failed to query for settled channel upgrade pair", err) + return err + } else if !settled { + logger.Info("waiting for settlement of channel upgrade pair") + <-ticker.C + continue + } + + cpHeaders, err := cp.SetupHeadersForUpdate(chain, sh.GetLatestFinalizedHeader(cp.ChainID())) + if err != nil { + logger.Error("failed to set up headers for LC update", err) + return err + } + + upgErr, err := QueryChannelUpgradeError(cpCtx, cp, true) + if err != nil { + logger.Error("failed to query the channel upgrade error receipt", err) + return err + } else if chann.Channel.State == chantypes.FLUSHCOMPLETE && + (upgErr == nil || upgErr.ErrorReceipt.Sequence != chann.Channel.UpgradeSequence) { + var err error + if upgErr == nil { + err = fmt.Errorf("upgrade error receipt not found") + } else { + err = fmt.Errorf("upgrade sequences don't match: channel.upgrade_sequence=%d, error_receipt.sequence=%d", + chann.Channel.UpgradeSequence, upgErr.ErrorReceipt.Sequence) + } + logger.Error("cannot cancel the upgrade in FLUSHCOMPLETE state", err) + return err + } else if upgErr == nil { + // NOTE: Even if an error receipt is not found, anyway try to execute ChanUpgradeCancel. + // If the sender is authority and the channel state is anything other than FLUSHCOMPLETE, + // the cancellation will be successful. + upgErr = &chantypes.QueryUpgradeErrorResponse{} + } + + addr, err := chain.GetAddress() + if err != nil { + logger.Error("failed to get address", err) + return err + } + + var msgs []sdk.Msg + msgs = append(msgs, chain.Path().UpdateClients(cpHeaders, addr)...) + msgs = append(msgs, chain.Path().ChanUpgradeCancel(upgErr, addr)) + + // NOTE: A call of SendMsgs for each msg is executed separately to avoid using multicall for eth. + // This is just a workaround and should be fixed in the future. + for _, msg := range msgs { + if _, err := chain.SendMsgs([]sdk.Msg{msg}); err != nil { + logger.Error("failed to send a msg to cancel the channel upgrade", err) + return err + } + } + logger.Info("successfully cancelled the channel upgrade") + + return nil + } +} + +func NewUpgradeState(chanState chantypes.State, upgradeExists bool) (UpgradeState, error) { + switch chanState { + case chantypes.OPEN: + if upgradeExists { + return UPGRADE_STATE_INIT, nil + } else { + return UPGRADE_STATE_UNINIT, nil + } + case chantypes.FLUSHING: + return UPGRADE_STATE_FLUSHING, nil + case chantypes.FLUSHCOMPLETE: + return UPGRADE_STATE_FLUSHCOMPLETE, nil + default: + return 0, fmt.Errorf("channel not opened yet: state=%s", chanState) + } +} + +func upgradeChannelStep(src, dst *ProvableChain, targetSrcState, targetDstState UpgradeState, firstCall bool) (*RelayMsgs, error) { + logger := GetChannelPairLogger(src, dst) + logger = &log.RelayLogger{Logger: logger.With("first_call", firstCall)} + + if err := validatePaths(src, dst); err != nil { + logger.Error("failed to validate paths", err) + return nil, err + } + + out := NewRelayMsgs() + + // First, update the light clients to the latest header and return the header + sh, err := NewSyncHeaders(src, dst) + if err != nil { + logger.Error("failed to create SyncHeaders", err) + return nil, err + } + + // Query a number of things all at once + var srcUpdateHeaders, dstUpdateHeaders []Header + if err := retry.Do(func() error { + srcUpdateHeaders, dstUpdateHeaders, err = sh.SetupBothHeadersForUpdate(src, dst) + return err + }, rtyAtt, rtyDel, rtyErr, retry.OnRetry(func(uint, error) { + if err := sh.Updates(src, dst); err != nil { + panic(err) + } + })); err != nil { + logger.Error("failed to set up headers for LC update on both chains", err) + return nil, err + } + + srcCtx := sh.GetQueryContext(src.ChainID()) + dstCtx := sh.GetQueryContext(dst.ChainID()) + + // query finalized channels with proofs + srcChan, dstChan, settled, err := querySettledChannelPair(srcCtx, dstCtx, src, dst, true) + if err != nil { + logger.Error("failed to query the channel pair with proofs", err) + return nil, err + } else if !settled { + return out, nil + } + + // query finalized channel upgrades with proofs + srcChanUpg, dstChanUpg, settled, err := querySettledChannelUpgradePair( + srcCtx, + dstCtx, + src, + dst, + true, + ) + if err != nil { + logger.Error("failed to query the channel upgrade pair with proofs", err) + return nil, err + } else if !settled { + return out, nil + } + + // determine upgrade states + srcState, err := NewUpgradeState(srcChan.Channel.State, srcChanUpg != nil) + if err != nil { + logger.Error("failed to create UpgradeState of the src chain", err) + return nil, err + } + dstState, err := NewUpgradeState(dstChan.Channel.State, dstChanUpg != nil) + if err != nil { + logger.Error("failed to create UpgradeState of the dst chain", err) + return nil, err + } + + logger = &log.RelayLogger{Logger: logger.With( + slog.Group("current_channel_upgrade_states", + "src", srcState.String(), + "dst", dstState.String(), + ), + )} + + // check if both chains have reached the target states or UNINIT states + if !firstCall && srcState == UPGRADE_STATE_UNINIT && dstState == UPGRADE_STATE_UNINIT || + srcState != UPGRADE_STATE_UNINIT && dstState != UPGRADE_STATE_UNINIT && srcState == targetSrcState && dstState == targetDstState { + logger.Info("both chains have reached the target states") + out.Last = true + return out, nil + } + + // determine next actions for src/dst chains + srcAction := UPGRADE_ACTION_NONE + dstAction := UPGRADE_ACTION_NONE + switch { + case srcState == UPGRADE_STATE_UNINIT && dstState == UPGRADE_STATE_UNINIT: + return nil, errors.New("channel upgrade is not initialized") + case srcState == UPGRADE_STATE_INIT && dstState == UPGRADE_STATE_UNINIT: + if dstChan.Channel.UpgradeSequence >= srcChan.Channel.UpgradeSequence { + srcAction = UPGRADE_ACTION_CANCEL + } else { + dstAction = UPGRADE_ACTION_TRY + } + case srcState == UPGRADE_STATE_UNINIT && dstState == UPGRADE_STATE_INIT: + if srcChan.Channel.UpgradeSequence >= dstChan.Channel.UpgradeSequence { + dstAction = UPGRADE_ACTION_CANCEL + } else { + srcAction = UPGRADE_ACTION_TRY + } + case srcState == UPGRADE_STATE_UNINIT && dstState == UPGRADE_STATE_FLUSHING: + dstAction = UPGRADE_ACTION_CANCEL + case srcState == UPGRADE_STATE_FLUSHING && dstState == UPGRADE_STATE_UNINIT: + srcAction = UPGRADE_ACTION_CANCEL + case srcState == UPGRADE_STATE_UNINIT && dstState == UPGRADE_STATE_FLUSHCOMPLETE: + if complete, err := upgradeAlreadyComplete(srcChan, dstCtx, dst, dstChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the src side has already completed", err) + return nil, err + } else if complete { + dstAction = UPGRADE_ACTION_OPEN + } else if timedout, err := upgradeAlreadyTimedOut(srcCtx, src, dstChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the src side has already timed out", err) + return nil, err + } else if timedout { + dstAction = UPGRADE_ACTION_TIMEOUT + } else { + dstAction = UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE + } + case srcState == UPGRADE_STATE_FLUSHCOMPLETE && dstState == UPGRADE_STATE_UNINIT: + if complete, err := upgradeAlreadyComplete(dstChan, srcCtx, src, srcChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the dst side has already completed", err) + return nil, err + } else if complete { + srcAction = UPGRADE_ACTION_OPEN + } else if timedout, err := upgradeAlreadyTimedOut(dstCtx, dst, srcChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the dst side has already timed out", err) + return nil, err + } else if timedout { + srcAction = UPGRADE_ACTION_TIMEOUT + } else { + srcAction = UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE + } + case srcState == UPGRADE_STATE_INIT && dstState == UPGRADE_STATE_INIT: // crossing hellos + // it is intentional to execute chanUpgradeTry on both sides if upgrade sequences + // are identical to each other. this is for testing purpose. + if srcChan.Channel.UpgradeSequence >= dstChan.Channel.UpgradeSequence { + dstAction = UPGRADE_ACTION_TRY + } + if srcChan.Channel.UpgradeSequence <= dstChan.Channel.UpgradeSequence { + srcAction = UPGRADE_ACTION_TRY + } + case srcState == UPGRADE_STATE_INIT && dstState == UPGRADE_STATE_FLUSHING: + if srcChan.Channel.UpgradeSequence != dstChan.Channel.UpgradeSequence { + dstAction = UPGRADE_ACTION_CANCEL + } else { + // chanUpgradeAck checks if counterparty-specified timeout has exceeded. + // if it has, chanUpgradeAck aborts the upgrade handshake. + // Therefore the relayer need not check timeout by itself. + srcAction = UPGRADE_ACTION_ACK + } + case srcState == UPGRADE_STATE_FLUSHING && dstState == UPGRADE_STATE_INIT: + if srcChan.Channel.UpgradeSequence != dstChan.Channel.UpgradeSequence { + srcAction = UPGRADE_ACTION_CANCEL + } else { + // chanUpgradeAck checks if counterparty-specified timeout has exceeded. + // if it has, chanUpgradeAck aborts the upgrade handshake. + // Therefore the relayer need not check timeout by itself. + dstAction = UPGRADE_ACTION_ACK + } + case srcState == UPGRADE_STATE_INIT && dstState == UPGRADE_STATE_FLUSHCOMPLETE: + if complete, err := upgradeAlreadyComplete(srcChan, dstCtx, dst, dstChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the src side has already completed", err) + return nil, err + } else if complete { + dstAction = UPGRADE_ACTION_OPEN + } else if timedout, err := upgradeAlreadyTimedOut(srcCtx, src, dstChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the src side has already timed out", err) + return nil, err + } else if timedout { + dstAction = UPGRADE_ACTION_TIMEOUT + } else { + dstAction = UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE + } + case srcState == UPGRADE_STATE_FLUSHCOMPLETE && dstState == UPGRADE_STATE_INIT: + if complete, err := upgradeAlreadyComplete(dstChan, srcCtx, src, srcChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the dst side has already completed", err) + return nil, err + } else if complete { + srcAction = UPGRADE_ACTION_OPEN + } else if timedout, err := upgradeAlreadyTimedOut(dstCtx, dst, srcChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the dst side has already timed out", err) + return nil, err + } else if timedout { + srcAction = UPGRADE_ACTION_TIMEOUT + } else { + srcAction = UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE + } + case srcState == UPGRADE_STATE_FLUSHING && dstState == UPGRADE_STATE_FLUSHING: + if timedout, err := upgradeAlreadyTimedOut(srcCtx, src, dstChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the src side has already timed out", err) + return nil, err + } else if timedout { + dstAction = UPGRADE_ACTION_TIMEOUT + } + if timedout, err := upgradeAlreadyTimedOut(dstCtx, dst, srcChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the dst side has already timed out", err) + return nil, err + } else if timedout { + srcAction = UPGRADE_ACTION_TIMEOUT + } + + // if either chain has already timed out, never execute chanUpgradeConfirm + if srcAction == UPGRADE_ACTION_TIMEOUT || dstAction == UPGRADE_ACTION_TIMEOUT { + break + } + + if completable, err := queryCanTransitionToFlushComplete(src); err != nil { + logger.Error("failed to check if the src channel can transition to FLUSHCOMPLETE", err) + return nil, err + } else if completable { + srcAction = UPGRADE_ACTION_CONFIRM + } + if completable, err := queryCanTransitionToFlushComplete(dst); err != nil { + logger.Error("failed to check if the dst channel can transition to FLUSHCOMPLETE", err) + return nil, err + } else if completable { + dstAction = UPGRADE_ACTION_CONFIRM + } + case srcState == UPGRADE_STATE_FLUSHING && dstState == UPGRADE_STATE_FLUSHCOMPLETE: + if timedout, err := upgradeAlreadyTimedOut(srcCtx, src, dstChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the src side has already timed out", err) + return nil, err + } else if timedout { + dstAction = UPGRADE_ACTION_TIMEOUT + } else if completable, err := queryCanTransitionToFlushComplete(src); err != nil { + logger.Error("failed to check if the src channel can transition to FLUSHCOMPLETE", err) + return nil, err + } else if completable { + srcAction = UPGRADE_ACTION_CONFIRM + } + case srcState == UPGRADE_STATE_FLUSHCOMPLETE && dstState == UPGRADE_STATE_FLUSHING: + if timedout, err := upgradeAlreadyTimedOut(dstCtx, dst, srcChanUpg); err != nil { + logger.Error("failed to check if the upgrade on the dst side has already timed out", err) + return nil, err + } else if timedout { + srcAction = UPGRADE_ACTION_TIMEOUT + } else if completable, err := queryCanTransitionToFlushComplete(dst); err != nil { + logger.Error("failed to check if the dst channel can transition to FLUSHCOMPLETE", err) + return nil, err + } else if completable { + dstAction = UPGRADE_ACTION_CONFIRM + } + case srcState == UPGRADE_STATE_FLUSHCOMPLETE && dstState == UPGRADE_STATE_FLUSHCOMPLETE: + srcAction = UPGRADE_ACTION_OPEN + dstAction = UPGRADE_ACTION_OPEN + default: + return nil, errors.New("unexpected state") + } + + logger = &log.RelayLogger{Logger: logger.With( + slog.Group("next_channel_upgrade_actions", + "src", srcAction.String(), + "dst", dstAction.String(), + ), + )} + + if srcAction != UPGRADE_ACTION_NONE { + addr := mustGetAddress(src) + + if len(dstUpdateHeaders) > 0 { + out.Src = append(out.Src, src.Path().UpdateClients(dstUpdateHeaders, addr)...) + } + + msg, err := buildActionMsg( + src, + srcAction, + srcChan, + addr, + dstCtx, + dst, + dstChan, + dstChanUpg, + ) + if err != nil { + logger.Error("failed to build Msg for the src chain", err) + return nil, err + } + + out.Src = append(out.Src, msg) + } + + if dstAction != UPGRADE_ACTION_NONE { + addr := mustGetAddress(dst) + + if len(srcUpdateHeaders) > 0 { + out.Dst = append(out.Dst, dst.Path().UpdateClients(srcUpdateHeaders, addr)...) + } + + msg, err := buildActionMsg( + dst, + dstAction, + dstChan, + addr, + srcCtx, + src, + srcChan, + srcChanUpg, + ) + if err != nil { + logger.Error("failed to build Msg for the dst chain", err) + return nil, err + } + + out.Dst = append(out.Dst, msg) + } + + logger.Info("successfully generates the next step of the channel upgrade") + return out, nil +} + +func queryProposedConnectionID(cpCtx QueryContext, cp *ProvableChain, cpChanUpg *chantypes.QueryUpgradeResponse) (string, error) { + if cpConn, err := cp.QueryConnection( + cpCtx, + cpChanUpg.Upgrade.Fields.ConnectionHops[0], + ); err != nil { + return "", err + } else { + return cpConn.Connection.Counterparty.ConnectionId, nil + } +} + +func queryCanTransitionToFlushComplete(chain interface { + ChainInfo + ICS04Querier +}) (bool, error) { + if h, err := chain.LatestHeight(); err != nil { + return false, err + } else { + return chain.QueryCanTransitionToFlushComplete(NewQueryContext(context.TODO(), h)) + } +} + +func querySettledChannelUpgradePair( + srcCtx, dstCtx QueryContext, + src, dst interface { + Chain + StateProver + }, + prove bool, +) (*chantypes.QueryUpgradeResponse, *chantypes.QueryUpgradeResponse, bool, error) { + logger := GetChannelPairLogger(src, dst) + logger = &log.RelayLogger{Logger: logger.With( + "src_height", srcCtx.Height().String(), + "dst_height", dstCtx.Height().String(), + "prove", prove, + )} + + // query channel upgrade pair at latest finalized heights + srcChanUpg, dstChanUpg, err := QueryChannelUpgradePair(srcCtx, dstCtx, src, dst, prove) + if err != nil { + logger.Error("failed to query a channel upgrade pair at the latest finalized heights", err) + return nil, nil, false, err + } + + // prepare QueryContext's based on the latest heights + var srcLatestCtx, dstLatestCtx QueryContext + if h, err := src.LatestHeight(); err != nil { + logger.Error("failed to get the latest height of the src chain", err) + return nil, nil, false, err + } else { + srcLatestCtx = NewQueryContext(context.TODO(), h) + } + if h, err := dst.LatestHeight(); err != nil { + logger.Error("failed to get the latest height of the dst chain", err) + return nil, nil, false, err + } else { + dstLatestCtx = NewQueryContext(context.TODO(), h) + } + + // query channel upgrade pair at latest heights + srcLatestChanUpg, dstLatestChanUpg, err := QueryChannelUpgradePair(srcLatestCtx, dstLatestCtx, src, dst, false) + if err != nil { + logger.Error("failed to query a channel upgrade pair at the latest heights", err) + return nil, nil, false, err + } + + if !compareUpgrades(srcChanUpg, srcLatestChanUpg) { + logger.Debug("src channel upgrade in transition") + return srcChanUpg, dstChanUpg, false, nil + } + if !compareUpgrades(dstChanUpg, dstLatestChanUpg) { + logger.Debug("dst channel upgrade in transition") + return srcChanUpg, dstChanUpg, false, nil + } + + return srcChanUpg, dstChanUpg, true, nil +} + +func compareUpgrades(a, b *chantypes.QueryUpgradeResponse) bool { + if a == nil { + return b == nil + } + if b == nil { + return false + } + return a.Upgrade.String() == b.Upgrade.String() +} + +func upgradeAlreadyComplete( + channel *chantypes.QueryChannelResponse, + cpCtx QueryContext, + cp *ProvableChain, + cpChanUpg *chantypes.QueryUpgradeResponse, +) (bool, error) { + proposedConnectionID, err := queryProposedConnectionID(cpCtx, cp, cpChanUpg) + if err != nil { + return false, err + } + result := channel.Channel.Version == cpChanUpg.Upgrade.Fields.Version && + channel.Channel.Ordering == cpChanUpg.Upgrade.Fields.Ordering && + channel.Channel.ConnectionHops[0] == proposedConnectionID + return result, nil +} + +func upgradeAlreadyTimedOut( + ctx QueryContext, + chain *ProvableChain, + cpChanUpg *chantypes.QueryUpgradeResponse, +) (bool, error) { + height := ctx.Height().(clienttypes.Height) + timestamp, err := chain.Timestamp(height) + if err != nil { + return false, err + } + return cpChanUpg.Upgrade.Timeout.Elapsed(height, uint64(timestamp.UnixNano())), nil +} + +// buildActionMsg builds and returns a MsgChannelUpgradeXXX message corresponding to `action`. +// This function also returns `UpgradeState` to which the channel will transition after the message is processed. +func buildActionMsg( + chain *ProvableChain, + action UpgradeAction, + selfChan *chantypes.QueryChannelResponse, + addr sdk.AccAddress, + cpCtx QueryContext, + cp *ProvableChain, + cpChan *chantypes.QueryChannelResponse, + cpUpg *chantypes.QueryUpgradeResponse, +) (sdk.Msg, error) { + pathEnd := chain.Path() + + switch action { + case UPGRADE_ACTION_TRY: + proposedConnectionID, err := queryProposedConnectionID(cpCtx, cp, cpUpg) + if err != nil { + return nil, err + } + return pathEnd.ChanUpgradeTry(proposedConnectionID, cpChan, cpUpg, addr), nil + case UPGRADE_ACTION_ACK: + return pathEnd.ChanUpgradeAck(cpChan, cpUpg, addr), nil + case UPGRADE_ACTION_CONFIRM: + return pathEnd.ChanUpgradeConfirm(cpChan, cpUpg, addr), nil + case UPGRADE_ACTION_OPEN: + return pathEnd.ChanUpgradeOpen(cpChan, addr), nil + case UPGRADE_ACTION_CANCEL: + upgErr, err := QueryChannelUpgradeError(cpCtx, cp, true) + if err != nil { + return nil, err + } else if upgErr == nil { + // NOTE: Even if an error receipt is not found, anyway try to execute ChanUpgradeCancel. + // If the sender is authority and the channel state is anything other than FLUSHCOMPLETE, + // the cancellation will be successful. + upgErr = &chantypes.QueryUpgradeErrorResponse{} + } + return pathEnd.ChanUpgradeCancel(upgErr, addr), nil + case UPGRADE_ACTION_CANCEL_FLUSHCOMPLETE: + upgErr, err := QueryChannelUpgradeError(cpCtx, cp, true) + if err != nil { + return nil, err + } else if upgErr == nil { + return nil, fmt.Errorf("upgrade error receipt not found") + } else if upgErr.ErrorReceipt.Sequence != selfChan.Channel.UpgradeSequence { + return nil, fmt.Errorf( + "upgrade sequences don't match: channel.upgrade_sequence=%d, error_receipt.sequence=%d", + selfChan.Channel.UpgradeSequence, upgErr.ErrorReceipt.Sequence) + } + return pathEnd.ChanUpgradeCancel(upgErr, addr), nil + case UPGRADE_ACTION_TIMEOUT: + return pathEnd.ChanUpgradeTimeout(cpChan, addr), nil + default: + panic(fmt.Errorf("unexpected action: %s", action)) + } +} diff --git a/core/channel.go b/core/channel.go index cfbec6d3..b7d61256 100644 --- a/core/channel.go +++ b/core/channel.go @@ -2,6 +2,7 @@ package core import ( "context" + "errors" "fmt" "log/slog" "time" @@ -41,39 +42,32 @@ func CreateChannel(pathName string, src, dst *ProvableChain, to time.Duration) e } chanSteps.Send(src, dst) + if chanSteps.Success() { + // In the case of success, synchronize the config file from generated channel identifiers if err := SyncChainConfigsFromEvents(pathName, chanSteps.SrcMsgIDs, chanSteps.DstMsgIDs, src, dst); err != nil { return err } - } - switch { - // In the case of success and this being the last transaction - // debug logging, log created connection and break - case chanSteps.Success() && chanSteps.Last: - logger.Info( - "★ Channel created", - ) - return nil - // In the case of success, reset the failures counter - case chanSteps.Success(): + // In the case of success and this being the last transaction + // debug logging, log created connection and break + if chanSteps.Last { + logger.Info("★ Channel created") + return nil + } + + // In the case of success, reset the failures counter failures = 0 - continue - // In the case of failure, increment the failures counter and exit if this is the 3rd failure - case !chanSteps.Success(): - failures++ - logger.Info("retrying transaction...") - time.Sleep(5 * time.Second) - if failures > 2 { - logger.Error( - "! Channel failed", - err, - ) - return fmt.Errorf("! Channel failed: [%s]chan{%s}port{%s} -> [%s]chan{%s}port{%s}", - src.ChainID(), src.Path().ChannelID, src.Path().PortID, - dst.ChainID(), dst.Path().ChannelID, dst.Path().PortID, - ) + } else { + // In the case of failure, increment the failures counter and exit if this is the 3rd failure + if failures++; failures > 2 { + err := errors.New("Channel handshake failed") + logger.Error(err.Error(), err) + return err } + + logger.Warn("Retrying transaction...") + time.Sleep(5 * time.Second) } } @@ -115,10 +109,10 @@ func checkChannelCreateReady(src, dst *ProvableChain, logger *log.RelayLogger) ( } if srcID != "" && srcState == chantypes.UNINITIALIZED { - return false, fmt.Errorf("src channel id is given but that channel does not exist: %s", srcID); + return false, fmt.Errorf("src channel id is given but that channel does not exist: %s", srcID) } if dstID != "" && dstState == chantypes.UNINITIALIZED { - return false, fmt.Errorf("dst channel id is given but that channel does not exist: %s", dstID); + return false, fmt.Errorf("dst channel id is given but that channel does not exist: %s", dstID) } if srcState == chantypes.OPEN && dstState == chantypes.OPEN { @@ -157,14 +151,16 @@ func createChannelStep(src, dst *ProvableChain) (*RelayMsgs, error) { return nil, err } - srcChan, dstChan, err := QueryChannelPair(sh.GetQueryContext(src.ChainID()), sh.GetQueryContext(dst.ChainID()), src, dst, true) + srcChan, dstChan, settled, err := querySettledChannelPair( + sh.GetQueryContext(src.ChainID()), + sh.GetQueryContext(dst.ChainID()), + src, + dst, + true, + ) if err != nil { return nil, err - } - - if finalized, err := checkChannelFinality(src, dst, srcChan.Channel, dstChan.Channel); err != nil { - return nil, err - } else if !finalized { + } else if !settled { return out, nil } @@ -250,29 +246,62 @@ func logChannelStates(src, dst *ProvableChain, srcChan, dstChan *chantypes.Query )) } -func checkChannelFinality(src, dst *ProvableChain, srcChannel, dstChannel *chantypes.Channel) (bool, error) { +func querySettledChannelPair( + srcCtx, dstCtx QueryContext, + src, dst interface { + Chain + StateProver + }, + prove bool, +) (*chantypes.QueryChannelResponse, *chantypes.QueryChannelResponse, bool, error) { logger := GetChannelPairLogger(src, dst) - sh, err := src.LatestHeight() + logger = &log.RelayLogger{Logger: logger.With( + "src_height", srcCtx.Height().String(), + "dst_height", dstCtx.Height().String(), + "prove", prove, + )} + + srcChan, dstChan, err := QueryChannelPair(srcCtx, dstCtx, src, dst, prove) if err != nil { - return false, err + logger.Error("failed to query channel pair at the latest finalized height", err) + return nil, nil, false, err } - dh, err := dst.LatestHeight() - if err != nil { - return false, err + + var srcLatestCtx, dstLatestCtx QueryContext + if h, err := src.LatestHeight(); err != nil { + logger.Error("failed to get the latest height of the src chain", err) + return nil, nil, false, err + } else { + srcLatestCtx = NewQueryContext(context.TODO(), h) + } + if h, err := dst.LatestHeight(); err != nil { + logger.Error("failed to get the latest height of the dst chain", err) + return nil, nil, false, err + } else { + dstLatestCtx = NewQueryContext(context.TODO(), h) } - srcChanLatest, dstChanLatest, err := QueryChannelPair(NewQueryContext(context.TODO(), sh), NewQueryContext(context.TODO(), dh), src, dst, false) + + srcLatestChan, dstLatestChan, err := QueryChannelPair(srcLatestCtx, dstLatestCtx, src, dst, false) if err != nil { - return false, err + logger.Error("failed to query channel pair at the latest height", err) + return nil, nil, false, err } - if srcChannel.State != srcChanLatest.Channel.State { - logger.Debug("src channel state in transition", "from_state", srcChannel.State, "to_state", srcChanLatest.Channel.State) - return false, nil + + if srcChan.Channel.String() != srcLatestChan.Channel.String() { + logger.Debug("src channel end in transition", + "from", srcChan.Channel.String(), + "to", srcLatestChan.Channel.String(), + ) + return srcChan, dstChan, false, nil } - if dstChannel.State != dstChanLatest.Channel.State { - logger.Debug("dst channel state in transition", "from_state", dstChannel.State, "to_state", dstChanLatest.Channel.State) - return false, nil + if dstChan.Channel.String() != dstLatestChan.Channel.String() { + logger.Debug("dst channel end in transition", + "from", dstChan.Channel.String(), + "to", dstLatestChan.Channel.String(), + ) + return srcChan, dstChan, false, nil } - return true, nil + return srcChan, dstChan, true, nil } func GetChannelLogger(c Chain) *log.RelayLogger { diff --git a/core/client.go b/core/client.go index 055d813d..e42e2af7 100644 --- a/core/client.go +++ b/core/client.go @@ -12,8 +12,8 @@ import ( ) func checkCreateClientsReady(src, dst *ProvableChain, logger *log.RelayLogger) (bool, error) { - srcID := src.Chain.Path().ClientID; - dstID := dst.Chain.Path().ClientID; + srcID := src.Chain.Path().ClientID + dstID := dst.Chain.Path().ClientID if srcID == "" && dstID == "" { return true, nil diff --git a/core/config.go b/core/config.go index ce1b8c9c..f7152108 100644 --- a/core/config.go +++ b/core/config.go @@ -1,28 +1,32 @@ package core import ( + "context" "encoding/json" "errors" "fmt" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/gogoproto/proto" + chantypes "github.com/cosmos/ibc-go/v8/modules/core/04-channel/types" "github.com/hyperledger-labs/yui-relayer/log" "github.com/hyperledger-labs/yui-relayer/utils" ) var config ConfigI -type ConfigIDType string +type PathConfigKey string const ( - ConfigIDClient ConfigIDType = "client" - ConfigIDConnection ConfigIDType = "connection" - ConfigIDChannel ConfigIDType = "channel" + PathConfigClientID PathConfigKey = "client-id" + PathConfigConnectionID PathConfigKey = "connection-id" + PathConfigChannelID PathConfigKey = "channel-id" + PathConfigOrder PathConfigKey = "order" + PathConfigVersion PathConfigKey = "version" ) type ConfigI interface { - UpdateConfigID(pathName string, chainID string, configID ConfigIDType, id string) error + UpdatePathConfig(pathName string, chainID string, kv map[PathConfigKey]string) error } func SetCoreConfig(c ConfigI) { @@ -156,23 +160,51 @@ func SyncChainConfigFromEvents(pathName string, msgIDs []MsgID, chain *ProvableC } for _, event := range msgRes.Events() { - var id string - var configID ConfigIDType + kv := make(map[PathConfigKey]string) + switch event := event.(type) { case *EventGenerateClientIdentifier: - configID = ConfigIDClient - id = event.ID + kv[PathConfigClientID] = event.ID case *EventGenerateConnectionIdentifier: - configID = ConfigIDConnection - id = event.ID + kv[PathConfigConnectionID] = event.ID case *EventGenerateChannelIdentifier: - configID = ConfigIDChannel - id = event.ID - } - if id != "" { - if err := config.UpdateConfigID(pathName, chain.ChainID(), configID, id); err != nil { - return err + kv[PathConfigChannelID] = event.ID + case *EventUpgradeChannel: + chann, err := chain.QueryChannel(NewQueryContext(context.TODO(), msgRes.BlockHeight())) + if err != nil { + return fmt.Errorf("failed to query a channel corresponding to the EventUpgradeChannel event: %v", err) + } + + if chann.Channel.UpgradeSequence != event.UpgradeSequence { + return fmt.Errorf("unexpected mismatch of upgrade sequence: channel.upgradeSequence=%d, event.upgradeSequence=%d", + chann.Channel.UpgradeSequence, + event.UpgradeSequence, + ) + } + + if len(chann.Channel.ConnectionHops) != 1 { + return fmt.Errorf("unexpected length of connectionHops: %d", len(chann.Channel.ConnectionHops)) } + + var order string + switch chann.Channel.Ordering { + case chantypes.ORDERED: + order = "ordered" + case chantypes.UNORDERED: + order = "unordered" + default: + return fmt.Errorf("unexpected channel ordering: %d", chann.Channel.Ordering) + } + + kv[PathConfigConnectionID] = chann.Channel.ConnectionHops[0] + kv[PathConfigOrder] = order + kv[PathConfigVersion] = chann.Channel.Version + default: + continue + } + + if err := config.UpdatePathConfig(pathName, chain.ChainID(), kv); err != nil { + return err } } } diff --git a/core/connection.go b/core/connection.go index 0a9c4f96..3b9ee5ca 100644 --- a/core/connection.go +++ b/core/connection.go @@ -50,39 +50,32 @@ func CreateConnection(pathName string, src, dst *ProvableChain, to time.Duration } connSteps.Send(src, dst) + if connSteps.Success() { + // In the case of success, synchronize the config file from generated connection identifiers. if err := SyncChainConfigsFromEvents(pathName, connSteps.SrcMsgIDs, connSteps.DstMsgIDs, src, dst); err != nil { return err } - } - switch { - // In the case of success and this being the last transaction - // debug logging, log created connection and break - case connSteps.Success() && connSteps.Last: - logger.Info( - "★ Connection created", - ) - return nil - // In the case of success, reset the failures counter - case connSteps.Success(): + // In the case of success and this being the last transaction + // debug logging, log created connection and break + if connSteps.Last { + logger.Info("★ Connection created") + return nil + } + + // In the case of success, reset the failures counter failed = 0 - continue - // In the case of failure, increment the failures counter and exit if this is the 3rd failure - case !connSteps.Success(): - failed++ - logger.Info("retrying transaction...") - time.Sleep(5 * time.Second) - if failed > 2 { - logger.Error( - "! Connection failed", - errors.New("failed 3 times"), - ) - return fmt.Errorf("! Connection failed: [%s]client{%s}conn{%s} -> [%s]client{%s}conn{%s}", - src.ChainID(), src.Path().ClientID, src.Path().ConnectionID, - dst.ChainID(), dst.Path().ClientID, dst.Path().ConnectionID, - ) + } else { + // In the case of failure, increment the failures counter and exit if this is the 3rd failure + if failed++; failed > 2 { + err := errors.New("Connection handshake failed") + logger.Error(err.Error(), err) + return err } + + logger.Warn("Retrying transaction...") + time.Sleep(5 * time.Second) } } @@ -107,7 +100,7 @@ func checkConnectionCreateReady(src, dst *ProvableChain, logger *log.RelayLogger if err != nil { return conntypes.UNINITIALIZED, err } - res, err2 := pc.QueryConnection(NewQueryContext(context.TODO(), latestHeight)) + res, err2 := pc.QueryConnection(NewQueryContext(context.TODO(), latestHeight), pc.Path().ConnectionID) if err2 != nil { return conntypes.UNINITIALIZED, err2 } @@ -125,10 +118,10 @@ func checkConnectionCreateReady(src, dst *ProvableChain, logger *log.RelayLogger } if srcID != "" && srcState == conntypes.UNINITIALIZED { - return false, fmt.Errorf("src connection id is given but that connection does not exist: %s", srcID); + return false, fmt.Errorf("src connection id is given but that connection does not exist: %s", srcID) } if dstID != "" && dstState == conntypes.UNINITIALIZED { - return false, fmt.Errorf("dst connection id is given but that connection does not exist: %s", dstID); + return false, fmt.Errorf("dst connection id is given but that connection does not exist: %s", dstID) } if srcState == conntypes.OPEN && dstState == conntypes.OPEN { @@ -171,14 +164,16 @@ func createConnectionStep(src, dst *ProvableChain) (*RelayMsgs, error) { return nil, err } - srcConn, dstConn, err := QueryConnectionPair(sh.GetQueryContext(src.ChainID()), sh.GetQueryContext(dst.ChainID()), src, dst, true) + srcConn, dstConn, settled, err := querySettledConnectionPair( + sh.GetQueryContext(src.ChainID()), + sh.GetQueryContext(dst.ChainID()), + src, + dst, + true, + ) if err != nil { return nil, err - } - - if finalized, err := checkConnectionFinality(src, dst, srcConn.Connection, dstConn.Connection); err != nil { - return nil, err - } else if !finalized { + } else if !settled { return out, nil } @@ -334,29 +329,62 @@ func mustGetAddress(chain interface { return addr } -func checkConnectionFinality(src, dst *ProvableChain, srcConnection, dstConnection *conntypes.ConnectionEnd) (bool, error) { +func querySettledConnectionPair( + srcCtx, dstCtx QueryContext, + src, dst interface { + Chain + StateProver + }, + prove bool, +) (*conntypes.QueryConnectionResponse, *conntypes.QueryConnectionResponse, bool, error) { logger := GetConnectionPairLogger(src, dst) - sh, err := src.LatestHeight() + logger = &log.RelayLogger{Logger: logger.With( + "src_height", srcCtx.Height().String(), + "dst_height", dstCtx.Height().String(), + "prove", prove, + )} + + srcConn, dstConn, err := QueryConnectionPair(srcCtx, dstCtx, src, dst, prove) if err != nil { - return false, err + logger.Error("failed to query connection pair at the latest finalized height", err) + return nil, nil, false, err } - dh, err := dst.LatestHeight() - if err != nil { - return false, err + + var srcLatestCtx, dstLatestCtx QueryContext + if h, err := src.LatestHeight(); err != nil { + logger.Error("failed to get the latest height of the src chain", err) + return nil, nil, false, err + } else { + srcLatestCtx = NewQueryContext(context.TODO(), h) + } + if h, err := dst.LatestHeight(); err != nil { + logger.Error("failed to get the latest height of the dst chain", err) + return nil, nil, false, err + } else { + dstLatestCtx = NewQueryContext(context.TODO(), h) } - srcConnLatest, dstConnLatest, err := QueryConnectionPair(NewQueryContext(context.TODO(), sh), NewQueryContext(context.TODO(), dh), src, dst, false) + + srcLatestConn, dstLatestConn, err := QueryConnectionPair(srcLatestCtx, dstLatestCtx, src, dst, false) if err != nil { - return false, err + logger.Error("failed to query connection pair at the latest height", err) + return nil, nil, false, err } - if srcConnection.State != srcConnLatest.Connection.State { - logger.Debug("src connection state in transition", "from_state", srcConnection.State, "to_state", srcConnLatest.Connection.State) - return false, nil + + if srcConn.Connection.String() != srcLatestConn.Connection.String() { + logger.Debug("src connection end in transition", + "from", srcConn.Connection.String(), + "to", srcLatestConn.Connection.String(), + ) + return srcConn, dstConn, false, nil } - if dstConnection.State != dstConnLatest.Connection.State { - logger.Debug("dst connection state in transition", "from_state", dstConnection.State, "to_state", dstConnLatest.Connection.State) - return false, nil + if dstConn.Connection.String() != dstLatestConn.Connection.String() { + logger.Debug("dst connection end in transition", + "from", dstConn.Connection.String(), + "to", dstLatestConn.Connection.String(), + ) + return srcConn, dstConn, false, nil } - return true, nil + return srcConn, dstConn, true, nil } func GetConnectionPairLogger(src, dst Chain) *log.RelayLogger { diff --git a/core/msg.go b/core/msg.go index bdf9f6d9..8068059b 100644 --- a/core/msg.go +++ b/core/msg.go @@ -46,6 +46,7 @@ var ( _ MsgEventLog = (*EventRecvPacket)(nil) _ MsgEventLog = (*EventWriteAcknowledgement)(nil) _ MsgEventLog = (*EventAcknowledgePacket)(nil) + _ MsgEventLog = (*EventUpgradeChannel)(nil) _ MsgEventLog = (*EventUnknown)(nil) ) @@ -115,6 +116,15 @@ type EventAcknowledgePacket struct { TimeoutTimestamp time.Time } +// EventUpgradeChannel is an implementation of `MsgEventLog` that notifies the completion of a channel upgrade +type EventUpgradeChannel struct { + isMsgEventLog + + PortID string + ChannelID string + UpgradeSequence uint64 +} + // EventUnknown is an implementation of `MsgEventLog` that represents another event. type EventUnknown struct { isMsgEventLog diff --git a/core/pathEnd.go b/core/pathEnd.go index 4ffba512..e5ec4ad2 100644 --- a/core/pathEnd.go +++ b/core/pathEnd.go @@ -237,6 +237,117 @@ func (pe *PathEnd) ChanCloseConfirm(dstChanState *chantypes.QueryChannelResponse ) } +// ChanUpgradeInit creates a MsgChannelUpgradeInit +func (pe *PathEnd) ChanUpgradeInit(upgradeFields chantypes.UpgradeFields, signer sdk.AccAddress) sdk.Msg { + return chantypes.NewMsgChannelUpgradeInit( + pe.PortID, + pe.ChannelID, + upgradeFields, + signer.String(), + ) +} + +// ChanUpgradeTry creates a MsgChannelUpgradeTry +func (pe *PathEnd) ChanUpgradeTry( + newConnectionID string, + counterpartyChan *chantypes.QueryChannelResponse, + counterpartyUpg *chantypes.QueryUpgradeResponse, + signer sdk.AccAddress, +) sdk.Msg { + return chantypes.NewMsgChannelUpgradeTry( + pe.PortID, + pe.ChannelID, + []string{newConnectionID}, + counterpartyUpg.Upgrade.Fields, + counterpartyChan.Channel.UpgradeSequence, + counterpartyChan.Proof, + counterpartyUpg.Proof, + counterpartyChan.ProofHeight, + signer.String(), + ) +} + +// ChanUpgradeAck creates a MsgChannelUpgradeAck +func (pe *PathEnd) ChanUpgradeAck( + counterpartyChan *chantypes.QueryChannelResponse, + counterpartyUpg *chantypes.QueryUpgradeResponse, + signer sdk.AccAddress, +) sdk.Msg { + return chantypes.NewMsgChannelUpgradeAck( + pe.PortID, + pe.ChannelID, + counterpartyUpg.Upgrade, + counterpartyChan.Proof, + counterpartyUpg.Proof, + counterpartyChan.ProofHeight, + signer.String(), + ) +} + +// ChanUpgradeConfirm creates a MsgChannelUpgradeConfirm +func (pe *PathEnd) ChanUpgradeConfirm( + counterpartyChan *chantypes.QueryChannelResponse, + counterpartyUpg *chantypes.QueryUpgradeResponse, + signer sdk.AccAddress, +) sdk.Msg { + return chantypes.NewMsgChannelUpgradeConfirm( + pe.PortID, + pe.ChannelID, + counterpartyChan.Channel.State, + counterpartyUpg.Upgrade, + counterpartyChan.Proof, + counterpartyUpg.Proof, + counterpartyChan.ProofHeight, + signer.String(), + ) +} + +// ChanUpgradeOpen creates a MsgChannelUpgradeOpen +func (pe *PathEnd) ChanUpgradeOpen( + counterpartyChan *chantypes.QueryChannelResponse, + signer sdk.AccAddress, +) sdk.Msg { + return chantypes.NewMsgChannelUpgradeOpen( + pe.PortID, + pe.ChannelID, + counterpartyChan.Channel.State, + counterpartyChan.Channel.UpgradeSequence, + counterpartyChan.Proof, + counterpartyChan.ProofHeight, + signer.String(), + ) +} + +// ChanUpgradeCancel creates a MsgChannelUpgradeCancel +func (pe *PathEnd) ChanUpgradeCancel( + counterpartyChanUpgErr *chantypes.QueryUpgradeErrorResponse, + signer sdk.AccAddress, +) sdk.Msg { + return chantypes.NewMsgChannelUpgradeCancel( + pe.PortID, + pe.ChannelID, + counterpartyChanUpgErr.ErrorReceipt, + counterpartyChanUpgErr.Proof, + counterpartyChanUpgErr.ProofHeight, + signer.String(), + ) +} + +// ChanUpgradeTimeout creates a MsgChannelUpgradeTimeout +func (pe *PathEnd) ChanUpgradeTimeout( + counterpartyChan *chantypes.QueryChannelResponse, + signer sdk.AccAddress, +) sdk.Msg { + return chantypes.NewMsgChannelUpgradeTimeout( + pe.PortID, + pe.ChannelID, + *counterpartyChan.Channel, + counterpartyChan.Proof, + counterpartyChan.ProofHeight, + signer.String(), + ) +} + // MsgTransfer creates a new transfer message func (pe *PathEnd) MsgTransfer(dst *PathEnd, amount sdk.Coin, dstAddr string, signer sdk.AccAddress, timeoutHeight, timeoutTimestamp uint64, memo string) sdk.Msg { diff --git a/core/query.go b/core/query.go index b93d0a81..7e3c9fcd 100644 --- a/core/query.go +++ b/core/query.go @@ -127,7 +127,7 @@ func QueryConnectionPair( return nil } var err error - srcConn, err = src.QueryConnection(srcCtx) + srcConn, err = src.QueryConnection(srcCtx, src.Path().ConnectionID) if err != nil { return err } else if srcConn.Connection.State == conntypes.UNINITIALIZED { @@ -154,7 +154,7 @@ func QueryConnectionPair( return nil } var err error - dstConn, err = dst.QueryConnection(dstCtx) + dstConn, err = dst.QueryConnection(dstCtx, dst.Path().ConnectionID) if err != nil { return err } else if dstConn.Connection.State == conntypes.UNINITIALIZED { @@ -238,3 +238,77 @@ func QueryChannelPair(srcCtx, dstCtx QueryContext, src, dst interface { err = eg.Wait() return } + +func QueryChannelUpgradePair(srcCtx, dstCtx QueryContext, src, dst interface { + Chain + StateProver +}, prove bool) (srcChanUpg, dstChanUpg *chantypes.QueryUpgradeResponse, err error) { + eg := new(errgroup.Group) + + // get channel upgrade from src chain + eg.Go(func() error { + var err error + srcChanUpg, err = src.QueryChannelUpgrade(srcCtx) + if err != nil { + return err + } else if srcChanUpg == nil { + return nil + } + + if !prove { + return nil + } + + if value, err := src.Codec().Marshal(&srcChanUpg.Upgrade); err != nil { + return err + } else { + path := host.ChannelUpgradePath(src.Path().PortID, src.Path().ChannelID) + srcChanUpg.Proof, srcChanUpg.ProofHeight, err = src.ProveState(srcCtx, path, value) + return err + } + }) + + // get channel upgrade from dst chain + eg.Go(func() error { + var err error + dstChanUpg, err = dst.QueryChannelUpgrade(dstCtx) + if err != nil { + return err + } else if dstChanUpg == nil { + return nil + } + + if !prove { + return nil + } + + if value, err := dst.Codec().Marshal(&dstChanUpg.Upgrade); err != nil { + return err + } else { + path := host.ChannelUpgradePath(dst.Path().PortID, dst.Path().ChannelID) + dstChanUpg.Proof, dstChanUpg.ProofHeight, err = dst.ProveState(dstCtx, path, value) + return err + } + }) + err = eg.Wait() + return +} + +func QueryChannelUpgradeError(ctx QueryContext, chain interface { + Chain + StateProver +}, prove bool) (*chantypes.QueryUpgradeErrorResponse, error) { + if chanUpgErr, err := chain.QueryChannelUpgradeError(ctx); err != nil { + return nil, err + } else if chanUpgErr == nil { + return nil, nil + } else if !prove { + return chanUpgErr, nil + } else if value, err := chain.Codec().Marshal(&chanUpgErr.ErrorReceipt); err != nil { + return nil, err + } else { + path := host.ChannelUpgradeErrorPath(chain.Path().PortID, chain.Path().ChannelID) + chanUpgErr.Proof, chanUpgErr.ProofHeight, err = chain.ProveState(ctx, path, value) + return chanUpgErr, err + } +} diff --git a/core/relayMsgs.go b/core/relayMsgs.go index cff5edee..6c52defd 100644 --- a/core/relayMsgs.go +++ b/core/relayMsgs.go @@ -1,8 +1,11 @@ package core import ( + "fmt" + sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/gogoproto/proto" + "github.com/hyperledger-labs/yui-relayer/log" ) // RelayMsgs contains the msgs that need to be sent to both a src and dst chain @@ -28,14 +31,7 @@ func NewRelayMsgs() *RelayMsgs { // Ready returns true if there are messages to relay func (r *RelayMsgs) Ready() bool { - if r == nil { - return false - } - - if len(r.Src) == 0 && len(r.Dst) == 0 { - return false - } - return true + return r != nil && (len(r.Src) > 0 || len(r.Dst) > 0) } // Success returns the success var @@ -76,10 +72,17 @@ func (r *RelayMsgs) Send(src, dst Chain) { txSize += uint64(len(bz)) if r.IsMaxTx(msgLen, txSize) { + logger := &log.RelayLogger{Logger: logger.With( + "msgs", msgsToLoggable(msgs), + "side", "src", + )} + // Submit the transactions to src chain and update its status msgIDs, err := src.SendMsgs(msgs) if err != nil { - logger.Error("failed to send msgs", err, "msgs", msgs) + logger.Error("failed to send msgs", err) + } else { + logger.Info("successfully sent msgs") } r.Succeeded = r.Succeeded && (err == nil) if err == nil { @@ -97,9 +100,16 @@ func (r *RelayMsgs) Send(src, dst Chain) { // submit leftover msgs if len(msgs) > 0 { + logger := &log.RelayLogger{Logger: logger.With( + "msgs", msgsToLoggable(msgs), + "side", "src", + )} + msgIDs, err := src.SendMsgs(msgs) if err != nil { - logger.Error("failed to send msgs", err, "msgs", msgs) + logger.Error("failed to send msgs", err) + } else { + logger.Info("successfully sent msgs") } r.Succeeded = r.Succeeded && (err == nil) if err == nil { @@ -125,10 +135,17 @@ func (r *RelayMsgs) Send(src, dst Chain) { txSize += uint64(len(bz)) if r.IsMaxTx(msgLen, txSize) { + logger := &log.RelayLogger{Logger: logger.With( + "msgs", msgsToLoggable(msgs), + "side", "dst", + )} + // Submit the transaction to dst chain and update its status msgIDs, err := dst.SendMsgs(msgs) if err != nil { - logger.Error("failed to send msgs", err, "msgs", msgs) + logger.Error("failed to send msgs", err) + } else { + logger.Info("successfully sent msgs") } r.Succeeded = r.Succeeded && (err == nil) if err == nil { @@ -146,9 +163,16 @@ func (r *RelayMsgs) Send(src, dst Chain) { // submit leftover msgs if len(msgs) > 0 { + logger := &log.RelayLogger{Logger: logger.With( + "msgs", msgsToLoggable(msgs), + "side", "dst", + )} + msgIDs, err := dst.SendMsgs(msgs) if err != nil { - logger.Error("failed to send msgs", err, "msgs", msgs) + logger.Error("failed to send msgs", err) + } else { + logger.Info("successfully sent msgs") } r.Succeeded = r.Succeeded && (err == nil) if err == nil { @@ -161,6 +185,14 @@ func (r *RelayMsgs) Send(src, dst Chain) { r.DstMsgIDs = dstMsgIDs } +func msgsToLoggable(msgs []sdk.Msg) []string { + var ret []string + for _, msg := range msgs { + ret = append(ret, fmt.Sprintf("%#v", msg)) + } + return ret +} + // Merge merges the argument into the receiver func (r *RelayMsgs) Merge(other *RelayMsgs) { r.Src = append(r.Src, other.Src...) diff --git a/tests/cases/tm2tm/Makefile b/tests/cases/tm2tm/Makefile index fd5f539d..7f853af5 100644 --- a/tests/cases/tm2tm/Makefile +++ b/tests/cases/tm2tm/Makefile @@ -12,6 +12,7 @@ test: ./scripts/fixture ./scripts/init-rly ./scripts/handshake + ./scripts/test-channel-upgrade ./scripts/test-create-client-success-single ./scripts/test-create-client-fail-already-created ./scripts/test-create-client-fail-unexist diff --git a/tests/cases/tm2tm/configs/path.json b/tests/cases/tm2tm/configs/path.json index 1cf5cddc..2d008d30 100644 --- a/tests/cases/tm2tm/configs/path.json +++ b/tests/cases/tm2tm/configs/path.json @@ -4,18 +4,18 @@ "client-id": "", "connection-id": "", "channel-id": "", - "port-id": "transfer", + "port-id": "mockapp", "order": "unordered", - "version": "ics20-1" + "version": "mockapp-1" }, "dst": { "chain-id": "ibc1", "client-id": "", "connection-id": "", "channel-id": "", - "port-id": "transfer", + "port-id": "mockapp", "order": "unordered", - "version": "ics20-1" + "version": "mockapp-1" }, "strategy": { "type": "naive" diff --git a/tests/cases/tm2tm/scripts/test-channel-upgrade b/tests/cases/tm2tm/scripts/test-channel-upgrade new file mode 100755 index 00000000..c03db195 --- /dev/null +++ b/tests/cases/tm2tm/scripts/test-channel-upgrade @@ -0,0 +1,141 @@ +#!/bin/bash + +set -eux + +source $(cd $(dirname "$0"); pwd)/../../../scripts/util + +SCRIPT_DIR=$(cd $(dirname $0); pwd) +RELAYER_CONF="$HOME/.yui-relayer" +RLY_BINARY=${SCRIPT_DIR}/../../../../build/yrly +RLY="${RLY_BINARY} --debug" + +CHAINID_ONE=ibc0 +CHAINID_TWO=ibc1 +RLYKEY=testkey +PATH_NAME=ibc01 + +# back up the original connection identifiers +srcOrigConnectionId=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."connection-id"') +dstOrigConnectionId=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."connection-id"') +srcOrigVersion=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."version"') +dstOrigVersion=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."version"') +srcOrigOrder=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."order"') +dstOrigOrder=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."order"') + +# back up the original config.json and make connection identifiers empty +origconfig=`mktemp` +cp "$RELAYER_CONF/config/config.json" $origconfig +$RLY paths edit $PATH_NAME src connection-id '' +$RLY paths edit $PATH_NAME dst connection-id '' + +# create a new connection and save the new connection identifiers +retry 5 $RLY tx connection $PATH_NAME +srcAltConnectionId=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."connection-id"') +dstAltConnectionId=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."connection-id"') +srcAltVersion=mockapp-999 +dstAltVersion=mockapp-999 +srcAltOrder=ordered +dstAltOrder=ordered + +# resume the original config.json +mv $origconfig "$RELAYER_CONF/config/config.json" + +checkResult() { + expectedSide=$1 + + srcConnectionId=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."connection-id"') + dstConnectionId=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."connection-id"') + srcVersion=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."version"') + dstVersion=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."version"') + srcOrder=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].src."order"') + dstOrder=$($RLY paths list --json | jq --raw-output --arg path_name "$PATH_NAME" '.[$path_name].dst."order"') + + if [ "$expectedSide" = orig ] + then + if [ "$srcConnectionId" != "$srcOrigConnectionId" -o "$dstConnectionId" != "$dstOrigConnectionId" -o "$srcVersion" != "$srcOrigVersion" -o "$dstVersion" != "$dstOrigVersion" -o "$srcOrder" != "$srcOrigOrder" -o "$dstOrder" != "$dstOrigOrder" ] + then + echo "path config is not equal to the original one: $srcConnectionId, $dstConnectionId, $srcVersion, $dstVersion, $srcOrder, $dstOrder" + exit 1 + fi + elif [ "$expectedSide" = alt ] + then + if [ "$srcConnectionId" != "$srcAltConnectionId" -o "$dstConnectionId" != "$dstAltConnectionId" -o "$srcVersion" != "$srcAltVersion" -o "$dstVersion" != "$dstAltVersion" -o "$srcOrder" != "$srcAltOrder" -o "$dstOrder" != "$dstAltOrder" ] + then + echo "path config is not equal to the alternative one: $srcConnectionId, $dstConnectionId, $srcVersion, $dstVersion, $srcOrder, $dstOrder" + exit 1 + fi + else + echo "expectedSide is invalid value: $expectedSide" + exit 1 + fi +} + +origSrcOpts="--ordering $srcOrigOrder --connection-hops $srcOrigConnectionId --version $srcOrigVersion" +origDstOpts="--ordering $dstOrigOrder --connection-hops $dstOrigConnectionId --version $dstOrigVersion" +altSrcOpts="--ordering $srcAltOrder --connection-hops $srcAltConnectionId --version $srcAltVersion" +altDstOpts="--ordering $dstAltOrder --connection-hops $dstAltConnectionId --version $dstAltVersion" + +echo '##### case 1 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $altSrcOpts +$RLY tx channel-upgrade init ibc01 ibc1 $altDstOpts +$RLY tx channel-upgrade execute ibc01 +checkResult alt + +echo '##### case 2 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $origSrcOpts +$RLY tx channel-upgrade init ibc01 ibc0 $origSrcOpts +$RLY tx channel-upgrade init ibc01 ibc1 $origDstOpts +$RLY tx channel-upgrade execute ibc01 +checkResult orig + +echo '##### case 3 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $altSrcOpts +$RLY tx channel-upgrade init ibc01 ibc1 $altDstOpts +$RLY tx channel-upgrade cancel ibc01 ibc0 # ibc0 returns to UNINIT. ibc0.error_receipt.sequence >= ibc1.channel.upgrade_sequence +$RLY tx channel-upgrade execute ibc01 # ibc1's upgrade should be cancelled +checkResult orig + +echo '##### case 4 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $altSrcOpts +$RLY tx channel-upgrade execute ibc01 --target-src-state INIT --target-dst-state FLUSHING +$RLY tx channel-upgrade cancel ibc01 ibc0 # ibc0 returns to UNINIT. ibc1 is FLUSHING. +$RLY tx channel-upgrade execute ibc01 # ibc1's upgrade should be cancelled +checkResult orig + +echo '##### case 5 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $altSrcOpts +$RLY tx channel-upgrade execute ibc01 --target-src-state INIT --target-dst-state FLUSHING +$RLY tx channel-upgrade cancel ibc01 ibc0 # ibc0 returns to UNINIT. ibc1 is FLUSHING. +$RLY tx channel-upgrade init ibc01 ibc0 --unsafe $altSrcOpts # ibc0 re-initiates new upgrade. +$RLY tx channel-upgrade execute ibc01 # The upgrade initiated by ibc0 should be completed after ibc1's one is cancelled. +checkResult alt + +echo '##### case 6 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $origSrcOpts +$RLY tx channel-upgrade execute ibc01 --target-src-state FLUSHCOMPLETE --target-dst-state FLUSHING +$RLY tx channel-upgrade cancel ibc01 ibc1 # ibc1 returns to UNINIT. ibc0 is FLUSHCOMPLETE. +$RLY tx channel-upgrade execute ibc01 # ibc0's upgrade (in FLUSHCOMPLETE) should be cancelled. +checkResult alt + +echo '##### case 7 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $origSrcOpts +$RLY tx channel-upgrade execute ibc01 --target-src-state FLUSHCOMPLETE --target-dst-state FLUSHING +$RLY tx channel-upgrade cancel ibc01 ibc1 # ibc1 returns to UNINIT. ibc0 is FLUSHCOMPLETE. +$RLY tx channel-upgrade init ibc01 ibc1 --unsafe $origDstOpts # ibc1 re-initiates new upgrade. +$RLY tx channel-upgrade execute ibc01 # The upgrade initiated by ibc1 should be completed after ibc0's one is cancelled. +checkResult orig + +echo '##### case 8 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $altSrcOpts +$RLY tx channel-upgrade execute ibc01 --target-src-state FLUSHCOMPLETE --target-dst-state FLUSHING +sleep 20 # ibc1 exceeds upgrade.timeout.timestamp +$RLY tx channel-upgrade execute ibc01 # ibc0 <= chanUpgradeTimeout, ibc1 <= chanUpgradeCancel +checkResult orig + +echo '##### case 9 #####' +$RLY tx channel-upgrade init ibc01 ibc0 $altSrcOpts +$RLY tx channel-upgrade init ibc01 ibc1 $altDstOpts +$RLY tx channel-upgrade execute ibc01 --target-src-state FLUSHING --target-dst-state FLUSHING +sleep 20 # Both chains exceed upgrade.timeout.timestamp +$RLY tx channel-upgrade execute ibc01 # ibc0,ibc1 <= chanUpgradeTimeout +checkResult orig diff --git a/tests/cases/tm2tm/scripts/test-create-channel-fail-unexist b/tests/cases/tm2tm/scripts/test-create-channel-fail-unexist index c811eab0..0f139e3f 100755 --- a/tests/cases/tm2tm/scripts/test-create-channel-fail-unexist +++ b/tests/cases/tm2tm/scripts/test-create-channel-fail-unexist @@ -35,7 +35,7 @@ r=$? if [ $r -eq 101 ]; then echo "$(basename $0): success" - cp $CONFIG.tmp $CONFIG + mv $CONFIG.tmp $CONFIG exit 0 else echo "$(basename $0): fail: $r" diff --git a/tests/cases/tm2tm/scripts/test-create-client-fail-unexist b/tests/cases/tm2tm/scripts/test-create-client-fail-unexist index 6a8b962d..f0534328 100755 --- a/tests/cases/tm2tm/scripts/test-create-client-fail-unexist +++ b/tests/cases/tm2tm/scripts/test-create-client-fail-unexist @@ -34,11 +34,11 @@ EOF r=$? if [ $r -eq 101 ]; then - echo "success" - cp $CONFIG.tmp $CONFIG + echo "$(basename $0): success" + mv $CONFIG.tmp $CONFIG exit 0 else - echo "fail: $r" + echo "$(basename $0): fail: $r" exit 1 fi diff --git a/tests/cases/tm2tm/scripts/test-create-client-success-single b/tests/cases/tm2tm/scripts/test-create-client-success-single index 87fa368e..a97d6297 100755 --- a/tests/cases/tm2tm/scripts/test-create-client-success-single +++ b/tests/cases/tm2tm/scripts/test-create-client-success-single @@ -53,4 +53,4 @@ if [ "$NEW_DST_CLIENT_ID" != "$OLD_DST_CLIENT_ID" ]; then exit 1 fi -cp $CONFIG.tmp $CONFIG +mv $CONFIG.tmp $CONFIG diff --git a/tests/cases/tm2tm/scripts/test-create-connection-fail-unexist b/tests/cases/tm2tm/scripts/test-create-connection-fail-unexist index c1472d79..022e2d0c 100755 --- a/tests/cases/tm2tm/scripts/test-create-connection-fail-unexist +++ b/tests/cases/tm2tm/scripts/test-create-connection-fail-unexist @@ -34,10 +34,11 @@ EOF r=$? if [ $r -eq 101 ]; then - echo "success" + echo "$(basename $0): success" + mv $CONFIG.tmp $CONFIG exit 0 else - echo "fail: $r" + echo "$(basename $0): fail: $r" exit 1 fi diff --git a/tests/cases/tm2tm/scripts/test-service b/tests/cases/tm2tm/scripts/test-service index 8b8a08f8..1c097e16 100755 --- a/tests/cases/tm2tm/scripts/test-service +++ b/tests/cases/tm2tm/scripts/test-service @@ -50,8 +50,9 @@ RLY_PID=$! echo "xxxxxxx ${SECONDS} relay service [packets = 0, time = 0] -> skip xxxxxx" sleep 4 -# transfer a token -${RLY} tx transfer ibc01 ibc0 ibc1 100samoleans ${TM_ADDRESS1} +# send a message via mockapp and wait for tx to be included +docker exec tendermint-chain0 sh -c "simd --home /root/data/ibc0 tx --keyring-backend=test --from ${TM_ADDRESS0} --chain-id ibc0 mockapp send mockapp channel-0 'mock packet data' --yes" +sleep 3 expectUnrelayedCount "unrelayed-packets" "src" 1 expectUnrelayedCount "unrelayed-acknowledgements" "dst" 0 @@ -60,7 +61,8 @@ sleep 13 echo "xxxxxxx ${SECONDS} relay service [packets = 1, time = 20] -> skip xxxxxx" sleep 4 -${RLY} tx transfer ibc01 ibc0 ibc1 100samoleans ${TM_ADDRESS1} +docker exec tendermint-chain0 sh -c "simd --home /root/data/ibc0 tx --keyring-backend=test --from ${TM_ADDRESS0} --chain-id ibc0 mockapp send mockapp channel-0 'mock packet data' --yes" +sleep 3 expectUnrelayedCount "unrelayed-packets" "src" 2 expectUnrelayedCount "unrelayed-acknowledgements" "dst" 0 @@ -72,9 +74,12 @@ sleep 4 expectUnrelayedCount "unrelayed-packets" "src" 0 expectUnrelayedCount "unrelayed-acknowledgements" "dst" 2 -${RLY} tx transfer ibc01 ibc0 ibc1 100samoleans ${TM_ADDRESS1} -${RLY} tx transfer ibc01 ibc0 ibc1 100samoleans ${TM_ADDRESS1} -${RLY} tx transfer ibc01 ibc0 ibc1 100samoleans ${TM_ADDRESS1} +docker exec tendermint-chain0 sh -c "simd --home /root/data/ibc0 tx --keyring-backend=test --from ${TM_ADDRESS0} --chain-id ibc0 mockapp send mockapp channel-0 'mock packet data' --yes" +sleep 3 +docker exec tendermint-chain0 sh -c "simd --home /root/data/ibc0 tx --keyring-backend=test --from ${TM_ADDRESS0} --chain-id ibc0 mockapp send mockapp channel-0 'mock packet data' --yes" +sleep 3 +docker exec tendermint-chain0 sh -c "simd --home /root/data/ibc0 tx --keyring-backend=test --from ${TM_ADDRESS0} --chain-id ibc0 mockapp send mockapp channel-0 'mock packet data' --yes" +sleep 3 expectUnrelayedCount "unrelayed-packets" "src" 3 expectUnrelayedCount "unrelayed-acknowledgements" "dst" 2 diff --git a/tests/cases/tm2tm/scripts/test-tx b/tests/cases/tm2tm/scripts/test-tx index 49c5e65f..62005305 100755 --- a/tests/cases/tm2tm/scripts/test-tx +++ b/tests/cases/tm2tm/scripts/test-tx @@ -14,15 +14,16 @@ TM_ADDRESS1=$(${RLY} tendermint keys show ibc1 testkey) echo "!!! ibc0 -> ibc1 !!!" -echo "Before ibc0 balance: $(${RLY} query balance ibc0 ${TM_ADDRESS0})" -echo "Before ibc1 balance: $(${RLY} query balance ibc1 ${TM_ADDRESS1})" - -${RLY} tx transfer ibc01 ibc0 ibc1 100samoleans ${TM_ADDRESS1} +docker exec tendermint-chain0 sh -c "simd --home /root/data/ibc0 tx --keyring-backend=test --from ${TM_ADDRESS0} --chain-id ibc0 mockapp send mockapp channel-0 'mock packet data' --yes" sleep ${TX_INTERNAL} ${RLY} tx relay --do-refresh ibc01 --src-seqs 1 sleep ${TX_INTERNAL} ${RLY} tx acks --do-refresh ibc01 --dst-seqs 1 -sleep ${TX_INTERNAL} -echo "After ibc0 balance: $(${RLY} query balance ibc0 ${TM_ADDRESS0})" -echo "After ibc1 balance: $(${RLY} query balance ibc1 ${TM_ADDRESS1})" +echo "!!! ibc1 -> ibc0 !!!" + +docker exec tendermint-chain1 sh -c "simd --home /root/data/ibc1 tx --keyring-backend=test --from ${TM_ADDRESS1} --chain-id ibc1 mockapp send mockapp channel-0 'mock packet data' --yes" +sleep ${TX_INTERNAL} +${RLY} tx relay --do-refresh ibc01 --dst-seqs 1 +sleep ${TX_INTERNAL} +${RLY} tx acks --do-refresh ibc01 --src-seqs 1 diff --git a/tests/chains/tendermint/Dockerfile b/tests/chains/tendermint/Dockerfile index a97b4869..7d1cbaa5 100644 --- a/tests/chains/tendermint/Dockerfile +++ b/tests/chains/tendermint/Dockerfile @@ -28,6 +28,8 @@ RUN ./scripts/tm-chain simd $CHAINID $CHAINDIR $RPCPORT $P2PPORT $PROFPORT $GRPC FROM alpine:${ALPINE_VER} +RUN apk add jq + WORKDIR /root ARG CHAINID diff --git a/tests/chains/tendermint/go.mod b/tests/chains/tendermint/go.mod index a753b290..91ad795f 100644 --- a/tests/chains/tendermint/go.mod +++ b/tests/chains/tendermint/go.mod @@ -21,6 +21,7 @@ require ( github.com/cosmos/gogoproto v1.4.11 github.com/cosmos/ibc-go/modules/capability v1.0.0 github.com/cosmos/ibc-go/v8 v8.2.0 + github.com/datachainlab/ibc-mock-app v0.1.0 github.com/datachainlab/ibc-mock-client v0.4.1 github.com/spf13/cast v1.6.0 github.com/spf13/cobra v1.8.0 diff --git a/tests/chains/tendermint/go.sum b/tests/chains/tendermint/go.sum index 49f456f6..e9887c66 100644 --- a/tests/chains/tendermint/go.sum +++ b/tests/chains/tendermint/go.sum @@ -383,6 +383,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/danieljoos/wincred v1.1.2 h1:QLdCxFs1/Yl4zduvBdcHB8goaYk9RARS2SgLLRuAyr0= github.com/danieljoos/wincred v1.1.2/go.mod h1:GijpziifJoIBfYh+S7BbkdUTU4LfM+QnGqR5Vl2tAx0= +github.com/datachainlab/ibc-mock-app v0.1.0 h1:lWgNj+7JlLsV4fDnp3YGvknRt0dAr8hyoFm1JJRmcWE= +github.com/datachainlab/ibc-mock-app v0.1.0/go.mod h1:wXISY/ZuXJter33Qv7Suul0WMteDP1UlRk+HixIjAHY= github.com/datachainlab/ibc-mock-client v0.4.1 h1:FQfyFOodgnchCIicpS7Vzji3yxXDe4Jl5hmE5Vz7M1s= github.com/datachainlab/ibc-mock-client v0.4.1/go.mod h1:2wGddiF2uHFhiMBpSskzKT/wA8naXi5DLoXt1KEZA1o= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/tests/chains/tendermint/scripts/entrypoint.sh b/tests/chains/tendermint/scripts/entrypoint.sh index eaf13871..c92e58f8 100755 --- a/tests/chains/tendermint/scripts/entrypoint.sh +++ b/tests/chains/tendermint/scripts/entrypoint.sh @@ -1,3 +1,6 @@ #!/bin/sh +export IBC_AUTHORITY=`jq -r .address /root/${CHAINDIR}/${CHAINID}/key_seed.json` +export IBC_CHANNEL_UPGRADE_TIMEOUT=20000000000 # 20sec = 20_000_000_000nsec + simd --home /root/${CHAINDIR}/${CHAINID} start --pruning=nothing --grpc.address="0.0.0.0:${GRPCPORT}" diff --git a/tests/chains/tendermint/simapp/app.go b/tests/chains/tendermint/simapp/app.go index 31565db7..ee73ca1f 100644 --- a/tests/chains/tendermint/simapp/app.go +++ b/tests/chains/tendermint/simapp/app.go @@ -6,6 +6,7 @@ import ( "io" "os" "path/filepath" + "strconv" dbm "github.com/cosmos/cosmos-db" "github.com/cosmos/gogoproto/proto" @@ -135,6 +136,10 @@ import ( mockclient "github.com/datachainlab/ibc-mock-client/modules/light-clients/xx-mock" mockclienttypes "github.com/datachainlab/ibc-mock-client/modules/light-clients/xx-mock/types" + + mockapp "github.com/datachainlab/ibc-mock-app" + mockappkeeper "github.com/datachainlab/ibc-mock-app/keeper" + mockapptypes "github.com/datachainlab/ibc-mock-app/types" ) const appName = "SimApp" @@ -157,6 +162,7 @@ var ( stakingtypes.NotBondedPoolName: {authtypes.Burner, authtypes.Staking}, govtypes.ModuleName: {authtypes.Burner}, ibctransfertypes.ModuleName: {authtypes.Minter, authtypes.Burner}, + mockapptypes.ModuleName: nil, ibcfeetypes.ModuleName: nil, icatypes.ModuleName: nil, ibcmock.ModuleName: nil, @@ -202,6 +208,7 @@ type SimApp struct { ICAHostKeeper icahostkeeper.Keeper EvidenceKeeper evidencekeeper.Keeper TransferKeeper ibctransferkeeper.Keeper + MockAppKeeper mockappkeeper.Keeper FeeGrantKeeper feegrantkeeper.Keeper GroupKeeper groupkeeper.Keeper ConsensusParamsKeeper consensusparamkeeper.Keeper @@ -210,6 +217,7 @@ type SimApp struct { // make scoped keepers public for test purposes ScopedIBCKeeper capabilitykeeper.ScopedKeeper ScopedTransferKeeper capabilitykeeper.ScopedKeeper + ScopedMockAppKeeper capabilitykeeper.ScopedKeeper ScopedFeeMockKeeper capabilitykeeper.ScopedKeeper ScopedICAControllerKeeper capabilitykeeper.ScopedKeeper ScopedICAHostKeeper capabilitykeeper.ScopedKeeper @@ -305,7 +313,7 @@ func NewSimApp( authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, crisistypes.StoreKey, minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, govtypes.StoreKey, group.StoreKey, paramstypes.StoreKey, ibcexported.StoreKey, upgradetypes.StoreKey, feegrant.StoreKey, - evidencetypes.StoreKey, ibctransfertypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, capabilitytypes.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, mockapptypes.StoreKey, icacontrollertypes.StoreKey, icahosttypes.StoreKey, capabilitytypes.StoreKey, authzkeeper.StoreKey, ibcfeetypes.StoreKey, consensusparamtypes.StoreKey, circuittypes.StoreKey, ) @@ -349,6 +357,8 @@ func NewSimApp( scopedFeeMockKeeper := app.CapabilityKeeper.ScopeToModule(MockFeePort) scopedICAMockKeeper := app.CapabilityKeeper.ScopeToModule(ibcmock.ModuleName + icacontrollertypes.SubModuleName) + scopedMockAppKeeper := app.CapabilityKeeper.ScopeToModule(mockapptypes.ModuleName) + // seal capability keeper after scoping modules // Applications that wish to enforce statically created ScopedKeepers should call `Seal` after creating // their scoped modules in `NewApp` with `ScopeToModule` @@ -411,8 +421,12 @@ func NewSimApp( // set the governance module account as the authority for conducting upgrades app.UpgradeKeeper = upgradekeeper.NewKeeper(skipUpgradeHeights, runtime.NewKVStoreService(keys[upgradetypes.StoreKey]), appCodec, homePath, app.BaseApp, authtypes.NewModuleAddress(govtypes.ModuleName).String()) + ibcAuthority := authtypes.NewModuleAddress(govtypes.ModuleName).String() + if authority, found := os.LookupEnv("IBC_AUTHORITY"); found { + ibcAuthority = authority + } app.IBCKeeper = ibckeeper.NewKeeper( - appCodec, keys[ibcexported.StoreKey], app.GetSubspace(ibcexported.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper, authtypes.NewModuleAddress(govtypes.ModuleName).String(), + appCodec, keys[ibcexported.StoreKey], app.GetSubspace(ibcexported.ModuleName), app.StakingKeeper, app.UpgradeKeeper, scopedIBCKeeper, ibcAuthority, ) if _, found := os.LookupEnv("USE_MOCK_CLIENT"); found { // this is a workaround in case the counterparty chain uses mock-client @@ -487,6 +501,12 @@ func NewSimApp( authtypes.NewModuleAddress(govtypes.ModuleName).String(), ) + app.MockAppKeeper = mockappkeeper.NewKeeper( + appCodec, keys[mockapptypes.StoreKey], + app.IBCKeeper.ChannelKeeper, app.IBCKeeper.ChannelKeeper, app.IBCKeeper.PortKeeper, + scopedMockAppKeeper, + ) + // Mock Module Stack // Mock Module setup for testing IBC and also acts as the interchain accounts authentication module @@ -506,6 +526,10 @@ func NewSimApp( mockBlockUpgradeMw := ibcmock.NewBlockUpgradeMiddleware(&mockModule, mockBlockUpgradeIBCModule.IBCApp) ibcRouter.AddRoute(ibcmock.MockBlockUpgrade, mockBlockUpgradeMw) + // mockapp Stack + mockAppStack := mockapp.NewIBCModule(app.MockAppKeeper) + ibcRouter.AddRoute(mockapptypes.ModuleName, mockAppStack) + // Create Transfer Stack // SendPacket, since it is originating from the application to core IBC: // transferKeeper.SendPacket -> fee.SendPacket -> channel.SendPacket @@ -616,6 +640,7 @@ func NewSimApp( ica.NewAppModule(&app.ICAControllerKeeper, &app.ICAHostKeeper), ibctm.NewAppModule(), solomachine.NewAppModule(), + mockapp.NewAppModule(app.MockAppKeeper), mockclient.NewAppModule(), mockModule, ) @@ -656,6 +681,7 @@ func NewSimApp( stakingtypes.ModuleName, ibcexported.ModuleName, ibctransfertypes.ModuleName, + mockapptypes.ModuleName, genutiltypes.ModuleName, authz.ModuleName, icatypes.ModuleName, @@ -668,6 +694,7 @@ func NewSimApp( stakingtypes.ModuleName, ibcexported.ModuleName, ibctransfertypes.ModuleName, + mockapptypes.ModuleName, capabilitytypes.ModuleName, genutiltypes.ModuleName, feegrant.ModuleName, @@ -688,7 +715,7 @@ func NewSimApp( authtypes.ModuleName, banktypes.ModuleName, distrtypes.ModuleName, stakingtypes.ModuleName, slashingtypes.ModuleName, govtypes.ModuleName, minttypes.ModuleName, crisistypes.ModuleName, - ibcexported.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, + ibcexported.ModuleName, genutiltypes.ModuleName, evidencetypes.ModuleName, authz.ModuleName, ibctransfertypes.ModuleName, mockapptypes.ModuleName, icatypes.ModuleName, ibcfeetypes.ModuleName, ibcmock.ModuleName, feegrant.ModuleName, paramstypes.ModuleName, upgradetypes.ModuleName, vestingtypes.ModuleName, group.ModuleName, consensusparamtypes.ModuleName, circuittypes.ModuleName, } @@ -787,6 +814,7 @@ func NewSimApp( app.ScopedIBCKeeper = scopedIBCKeeper app.ScopedTransferKeeper = scopedTransferKeeper + app.ScopedMockAppKeeper = scopedMockAppKeeper app.ScopedICAControllerKeeper = scopedICAControllerKeeper app.ScopedICAHostKeeper = scopedICAHostKeeper @@ -869,8 +897,15 @@ func (app *SimApp) InitChainer(ctx sdk.Context, req *abci.RequestInitChain) (*ab ibcGenesisState.ClientGenesis.Params.AllowedClients = append( ibcGenesisState.ClientGenesis.Params.AllowedClients, mockclienttypes.Mock) - genesisState[ibcexported.ModuleName] = app.appCodec.MustMarshalJSON(&ibcGenesisState) } + if upgTimeout := os.Getenv("IBC_CHANNEL_UPGRADE_TIMEOUT"); len(upgTimeout) > 0 { + upgTimeoutTimestampNsec, err := strconv.ParseInt(upgTimeout, 10, 64) + if err != nil { + panic(err) + } + ibcGenesisState.ChannelGenesis.Params.UpgradeTimeout.Timestamp = uint64(upgTimeoutTimestampNsec) + } + genesisState[ibcexported.ModuleName] = app.appCodec.MustMarshalJSON(&ibcGenesisState) } if err := app.UpgradeKeeper.SetModuleVersionMap(ctx, app.ModuleManager.GetVersionMap()); err != nil {